diff --git a/package/boot/uboot-envtools/files/alpine b/package/boot/uboot-envtools/files/alpine new file mode 100644 index 00000000000000..0df5401c2630b8 --- /dev/null +++ b/package/boot/uboot-envtools/files/alpine @@ -0,0 +1,26 @@ +#!/bin/sh +# +# Copyright (C) 2016 LEDE +# + +[ -e /etc/config/ubootenv ] && exit 0 + +touch /etc/config/ubootenv + +. /lib/uboot-envtools.sh +. /lib/functions.sh + +board=$(board_name) + +case "$board" in +netgear,r9000|\ +netgear,xr700) + ubootenv_add_uci_config "/dev/mtd1" "0x00000" "0x4000" "0x20000" + ubootenv_add_uci_config "/dev/mtd1" "0x20000" "0x4000" "0x20000" + ;; +esac + +config_load ubootenv +config_foreach ubootenv_add_app_config ubootenv + +exit 0 diff --git a/package/kernel/linux/modules/crypto.mk b/package/kernel/linux/modules/crypto.mk index ce221635470302..164fcd58266f01 100644 --- a/package/kernel/linux/modules/crypto.mk +++ b/package/kernel/linux/modules/crypto.mk @@ -357,6 +357,22 @@ endef $(eval $(call KernelPackage,crypto-hmac)) +define KernelPackage/crypto-hw-alpine + TITLE:=Alpine hardware crypto module + DEPENDS:=+kmod-crypto-manager + KCONFIG:= \ + CONFIG_CRYPTO_HW=y \ + CONFIG_CRYPTO_DEV_AL_CRYPTO \ + CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS \ + CONFIG_CRYPTO_DEV_AL_AHASH_CRC + FILES:=$(LINUX_DIR)/drivers/crypto/al/al_crypto.ko + AUTOLOAD:=$(call AutoLoad,09,al_crypto) + $(call AddDepends/crypto) +endef + +$(eval $(call KernelPackage,crypto-hw-alpine)) + + define KernelPackage/crypto-hw-ccp TITLE:=AMD Cryptographic Coprocessor DEPENDS:= \ diff --git a/package/kernel/linux/modules/netdevices.mk b/package/kernel/linux/modules/netdevices.mk index 1470c5c1761046..b97136e97af754 100644 --- a/package/kernel/linux/modules/netdevices.mk +++ b/package/kernel/linux/modules/netdevices.mk @@ -1964,3 +1964,37 @@ define KernelPackage/amazon-ena/description endef $(eval $(call KernelPackage,amazon-ena)) + +define KernelPackage/mdio-al-gpio + SUBMENU:=$(NETWORK_DEVICES_MENU) + TITLE:=Alpine MDIO GPIO bus controller + DEPENDS:=@TARGET_alpine +kmod-mdio + KCONFIG:=CONFIG_MDIO_AL_GPIO + FILES:=$(LINUX_DIR)/drivers/net/mdio/mdio-al-gpio.ko + AUTOLOAD:=$(call AutoLoad,11,mdio-al-gpio,1) +endef + +define KernelPackage/mdio-gpio/description + Supports Alpine MDIO GPIO bus controller +endef + +$(eval $(call KernelPackage,mdio-al-gpio)) + +define KernelPackage/al-eth + SUBMENU:=$(NETWORK_DEVICES_MENU) + TITLE:=Annapurna Labs unified 1G/10G Ethernet driver + DEPENDS:=@PCI_SUPPORT @TARGET_alpine +kmod-mdio + KCONFIG:= \ + CONFIG_NET_AL_ETH \ + CONFIG_AL_ETH_ALLOC_FRAG=y \ + CONFIG_AL_ETH_ALLOC_PAGE=n \ + CONFIG_AL_ETH_ALLOC_SKB=n + FILES:=$(LINUX_DIR)/drivers/net/ethernet/al/al_eth_drv.ko + AUTOLOAD:=$(call AutoLoad,10,al-eth-drv,1) +endef + +define KernelPackage/al-eth/description + Kernel modules for Annapurna Labs unified 1G/10G Ethernet driver +endef + +$(eval $(call KernelPackage,al-eth)) diff --git a/target/linux/alpine/Makefile b/target/linux/alpine/Makefile new file mode 100644 index 00000000000000..8e5d255beed5ac --- /dev/null +++ b/target/linux/alpine/Makefile @@ -0,0 +1,29 @@ +# Copyright (c) 2013 The Linux Foundation. All rights reserved. +# +include $(TOPDIR)/rules.mk + +ARCH:=arm +BOARD:=alpine +BOARDNAME:=Annapurna Labs Alpine +FEATURES:=squashfs nand fpu ramdisk usb pci pcie +CPU_TYPE:=cortex-a15 +CPU_SUBTYPE:=neon-vfpv4 +SUBTARGETS:=generic + +KERNEL_PATCHVER:=6.6 + +KERNELNAME:=zImage Image dtbs + +include $(INCLUDE_DIR)/target.mk + +DEFAULT_PACKAGES += \ + kmod-leds-gpio kmod-gpio-button-hotplug swconfig \ + kmod-ata-ahci kmod-ata-ahci-platform \ + kmod-usb-ohci kmod-usb-ohci-pci kmod-usb2 kmod-usb2-pci kmod-usb3 kmod-usb-ledtrig-usbport \ + kmod-ath10k-ct ath10k-firmware-qca9984-ct kmod-wil6210 \ + kmod-usb-storage kmod-scsi-core kmod-fs-ext4 kmod-fs-btrfs kmod-fs-vfat kmod-fs-msdos block-mount \ + kmod-al-eth kmod-mdio-al-gpio \ + cfdisk e2fsprogs badblocks partx-utils \ + wpad-openssl uboot-envtools + +$(eval $(call BuildTarget)) diff --git a/target/linux/alpine/base-files/etc/board.d/02_network b/target/linux/alpine/base-files/etc/board.d/02_network new file mode 100644 index 00000000000000..7aeb4b752a2902 --- /dev/null +++ b/target/linux/alpine/base-files/etc/board.d/02_network @@ -0,0 +1,47 @@ +# +# Copyright (c) 2015 The Linux Foundation. All rights reserved. +# Copyright (c) 2011-2015 OpenWrt.org +# + +. /lib/functions/uci-defaults.sh +. /lib/functions/system.sh + +board_config_update + +board=$(board_name) + +# +# Switch configuration +# +# There are two switches: master (switch0) and slave (switch1). +# +# Ports P0 and P5 of the master switch are connected to the CPU ports +# ETH1 and ETH2, respectively. ETH1 shall be used for LAN and ETH2 for WAN. +# +# Port P3 of the master switch is the WAN port. VLAN2 is set up +# to isolate P3 and P5 of the master switch from LAN ports of +# both the master and the slave switch. +# +# All ports of the master switch except P3 and P5 are assigned to VLAN1. +# +# All ports of the slave switch are assigned to VLAN1. Port P6 of the slave +# switch is unused and idle. +# + +case "$board" in +netgear,r9000|\ +netgear,xr700) + ucidef_set_interfaces_lan_wan "eth1.1" "eth2.2" + ucidef_add_switch "switch0" \ + "0t@eth1" "1:lan" "2:lan" "4:lan" "6:lan" "3:wan" "5t@eth2" + ucidef_add_switch "switch1" \ + "6t@eth1" "0:lan" "1:lan" "2:lan" "3:lan" "4:lan" "5:lan" + ;; +*) + echo "Unsupported hardware. Network interfaces not intialized" + ;; +esac + +board_config_flush + +exit 0 diff --git a/target/linux/alpine/base-files/etc/uci-defaults/xx_customizations b/target/linux/alpine/base-files/etc/uci-defaults/xx_customizations new file mode 100644 index 00000000000000..8b5d8f3aaa52cf --- /dev/null +++ b/target/linux/alpine/base-files/etc/uci-defaults/xx_customizations @@ -0,0 +1,54 @@ + +wifi_key=$(uci -q get wireless.@wifi-iface[1].key) +if [ -z "$wifi_key" ]; then + uci set wireless.@wifi-iface[1].ssid='home_network' + uci set wireless.@wifi-iface[1].encryption=psk2 + uci set wireless.@wifi-iface[1].key='secret' + + uci set wireless.@wifi-device[1].country=US + uci set wireless.@wifi-device[1].channel=auto + uci set wireless.@wifi-device[1].disabled='0' + + uci commit wireless +fi + +exit 0 + +uci set system.@system[0].hostname='OpenWrt-dev' +uci commit system + +# +# IP address +# + +uci set network.lan.ipaddr=192.168.2.1 +uci commit network + +# +# SSH +# + +if [ $(uci -q get dropbear.@dropbear[0].Port) = "22" ]; then + uci set dropbear.@dropbear[0].Port='2222' + uci commit dropbear + service dropbear restart + + sed -i -e '/ListenAddress/d' /etc/ssh/sshd_config + sed -i -e '/PasswordAuthentication/d' /etc/ssh/sshd_config + sed -i -e '/PermitEmptyPasswords/d' /etc/ssh/sshd_config + sed -i -e '/PermitRootLogin/d' /etc/ssh/sshd_config + sed -i -e '/UsePAM/d' /etc/ssh/sshd_config + + cat >>/etc/ssh/sshd_config < +#include + +/memreserve/ 0x0000000000000000 0x0000000000100000; + +/ { + #address-cells = <0x2>; + #size-cells = <0x2>; + clock-ranges; + version = "2.5"; + model = "Annapurna Labs Alpine Dev Board"; + + chosen { + bootargs = "console=ttyS0,115200 pci=pcie_bus_perf ubi.mtd=ubi earlycon"; + stdout-path = &serial; + }; + + memory { + device_type = "memory"; + reg = <0x00000000 0x00000000 0x00000000 0x10000000 + 0x00000000 0x10000000 0x00000000 0x10000000 + 0x00000000 0x20000000 0x00000000 0x10000000 + 0x00000000 0x30000000 0x00000000 0x10000000>; + }; + + cpus { + #address-cells = <0x1>; + #size-cells = <0x0>; + + cpu@0 { + compatible = "arm,cortex-a15"; + device_type = "cpu"; + reg = <0x0>; + clocks = <0x1>; + clock-names = "cpu"; + clock-frequency = <0x6553f100>; + }; + + cpu@1 { + compatible = "arm,cortex-a15"; + device_type = "cpu"; + reg = <0x1>; + clocks = <0x1>; + clock-names = "cpu"; + clock-frequency = <0x6553f100>; + }; + + cpu@2 { + compatible = "arm,cortex-a15"; + device_type = "cpu"; + reg = <0x2>; + clocks = <0x1>; + clock-names = "cpu"; + clock-frequency = <0x6553f100>; + }; + + cpu@3 { + compatible = "arm,cortex-a15"; + device_type = "cpu"; + reg = <0x3>; + clocks = <0x1>; + clock-names = "cpu"; + clock-frequency = <0x6553f100>; + }; + }; + + soc { + #address-cells = <0x2>; + #size-cells = <0x2>; + compatible = "simple-bus"; + interrupt-parent = <0x2>; + ranges; + + arch-timer { + compatible = "arm,cortex-a15-timer", "arm,armv7-timer"; + interrupts = <0x1 0xd 0xf08 0x1 0xe 0xf08 0x1 0xb 0xf08 0x1 0xa 0xf08>; + clock-frequency = <0x2faf080>; + }; + + gic_main { + compatible = "arm,cortex-a15-gic"; + #interrupt-cells = <0x3>; + #size-cells = <0x0>; + #address-cells = <0x0>; + interrupt-controller; + reg = <0x0 0xfb001000 0x0 0x1000 0x0 0xfb002000 0x0 0x2000 0x0 0xfb004000 0x0 0x1000 0x0 0xfb006000 0x0 0x2000>; + interrupts = <0x1 0x9 0xf04>; + linux,phandle = <0x2>; + phandle = <0x2>; + }; + + cpu_resume { + compatible = "annapurna-labs,al-cpu-resume"; + reg = <0x0 0xfbff5ec0 0x0 0x30>; + }; + + ccu { + compatible = "annapurna-labs,al-ccu"; + reg = <0x0 0xfb090000 0x0 0x10000>; + io_coherency = <0x1>; + }; + + nb_service { + compatible = "annapurna-labs,al-nb-service"; + reg = <0x0 0xfb070000 0x0 0x10000>; + interrupts = <0x0 0x40 0x4 0x0 0x41 0x4 0x0 0x42 0x4 0x0 0x43 0x4>; + dev_ord_relax = <0x0>; + }; + + pbs { + compatible = "annapurna-labs,al-pbs"; + reg = <0x0 0xfd8a8000 0x0 0x1000>; + }; + + msix { + compatible = "annapurna-labs,al-msix"; + reg = <0x0 0xfbe00000 0x0 0x100000>; + interrupts = <0x0 0x60 0x1 0x0 0x9f 0x1>; + }; + + pmu { + compatible = "arm,cortex-a15-pmu"; + interrupts = <0x0 0x44 0x4 0x0 0x45 0x4 0x0 0x46 0x4 0x0 0x47 0x4>; + }; + + timer0 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x0 0xfd890000 0x0 0x1000>; + interrupts = <0x0 0x9 0x4>; + clocks = <0x3>; + clock-names = "sbclk"; + status = "disabled"; + }; + + timer1 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x0 0xfd891000 0x0 0x1000>; + interrupts = <0x0 0xa 0x4>; + clocks = <0x3>; + clock-names = "sbclk"; + }; + + timer2 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x0 0xfd892000 0x0 0x1000>; + interrupts = <0x0 0xb 0x4>; + clocks = <0x3>; + clock-names = "sbclk"; + status = "disabled"; + }; + + timer3 { + compatible = "arm,sp804", "arm,primecell"; + reg = <0x0 0xfd893000 0x0 0x1000>; + interrupts = <0x0 0xc 0x4>; + clocks = <0x3>; + clock-names = "sbclk"; + status = "disabled"; + }; + + wdt0 { + compatible = "arm,sp805", "arm,primecell"; + reg = <0x0 0xfd88c000 0x0 0x1000>; + interrupts = <0x0 0xd 0x4>; + clocks = <0x3>; + clock-names = "apb_pclk"; + }; + + wdt1 { + compatible = "arm,sp805", "arm,primecell"; + reg = <0x0 0xfd88d000 0x0 0x1000>; + interrupts = <0x0 0xe 0x4>; + clocks = <0x3>; + clock-names = "apb_pclk"; + status = "disabled"; + }; + + wdt2 { + compatible = "arm,sp805", "arm,primecell"; + reg = <0x0 0xfd88e000 0x0 0x1000>; + interrupts = <0x0 0xf 0x4>; + clocks = <0x3>; + clock-names = "apb_pclk"; + status = "disabled"; + }; + + wdt3 { + compatible = "arm,sp805", "arm,primecell"; + reg = <0x0 0xfd88f000 0x0 0x1000>; + interrupts = <0x0 0x10 0x4>; + clocks = <0x3>; + clock-names = "apb_pclk"; + status = "disabled"; + }; + + i2c-pld { + #address-cells = <0x1>; + #size-cells = <0x0>; + compatible = "snps,designware-i2c"; + reg = <0x0 0xfd880000 0x0 0x1000>; + interrupts = <0x0 0x15 0x4>; + clocks = <0x3>; + clock-frequency = <0x61a80>; + + bq32k@68 { + compatible = "bq32000"; + reg = <0x68>; + }; + + i2c_expender@20 { + compatible = "pca9554"; + reg = <0x20>; + }; + + g761@3e { + compatible = "gmt,g761"; + reg = <0x3e>; + clocks = <0x4>; + fan_gear_mode = <0x0>; + fan_startv = <0x3>; + pwm_polarity = <0x0>; + }; + }; + + i2c-gen { + #address-cells = <0x1>; + #size-cells = <0x0>; + compatible = "snps,designware-i2c"; + reg = <0x0 0xfd894000 0x0 0x1000>; + interrupts = <0x0 0x8 0x4>; + clocks = <0x3>; + clock-frequency = <0x61a80>; + status = "disabled"; + }; + + gpio0: gpio0 { + #gpio-cells = <0x2>; + compatible = "arm,pl061", "arm,primecell"; + gpio-controller; + reg = <0x0 0xfd887000 0x0 0x1000>; + interrupts = <0x0 0x2 0x4>; + clocks = <0x3>; + clock-names = "apb_pclk"; + baseidx = <0x0>; + }; + + gpio1: gpio1 { + #gpio-cells = <0x2>; + compatible = "arm,pl061", "arm,primecell"; + gpio-controller; + reg = <0x0 0xfd888000 0x0 0x1000>; + interrupts = <0x0 0x3 0x4>; + clocks = <0x3>; + clock-names = "apb_pclk"; + baseidx = <0x8>; + }; + + gpio2: gpio2 { + #gpio-cells = <0x2>; + compatible = "arm,pl061", "arm,primecell"; + gpio-controller; + reg = <0x0 0xfd889000 0x0 0x1000>; + interrupts = <0x0 0x4 0x4>; + clocks = <0x3>; + clock-names = "apb_pclk"; + baseidx = <0x10>; + }; + + gpio3: gpio3 { + #gpio-cells = <0x2>; + compatible = "arm,pl061", "arm,primecell"; + gpio-controller; + reg = <0x0 0xfd88a000 0x0 0x1000>; + interrupts = <0x0 0x5 0x4>; + clocks = <0x3>; + clock-names = "apb_pclk"; + baseidx = <0x18>; + }; + + gpio4: gpio4 { + #gpio-cells = <0x2>; + compatible = "arm,pl061", "arm,primecell"; + gpio-controller; + reg = <0x0 0xfd88b000 0x0 0x1000>; + interrupts = <0x0 0x6 0x4>; + clocks = <0x3>; + clock-names = "apb_pclk"; + baseidx = <0x20>; + }; + + gpio5: gpio5 { + #gpio-cells = <0x2>; + compatible = "arm,pl061", "arm,primecell"; + gpio-controller; + reg = <0x0 0xfd897000 0x0 0x1000>; + interrupts = <0x0 0x7 0x4>; + clocks = <0x3>; + clock-names = "apb_pclk"; + baseidx = <0x28>; + }; + + serial: uart0 { + compatible = "ns16550a"; + reg = <0x0 0xfd883000 0x0 0x1000>; + clock-frequency = <0x165a0bc0>; + interrupts = <0x0 0x11 0x4>; + reg-shift = <0x2>; + reg-io-width = <0x4>; + }; + + uart1 { + compatible = "ns16550a"; + reg = <0x0 0xfd884000 0x0 0x1000>; + clock-frequency = <0x165a0bc0>; + interrupts = <0x0 0x12 0x4>; + reg-shift = <0x2>; + reg-io-width = <0x4>; + }; + + uart2 { + compatible = "ns16550a"; + reg = <0x0 0xfd885000 0x0 0x1000>; + clock-frequency = <0x165a0bc0>; + interrupts = <0x0 0x13 0x4>; + reg-shift = <0x2>; + reg-io-width = <0x4>; + status = "disabled"; + }; + + uart3 { + compatible = "ns16550a"; + reg = <0x0 0xfd886000 0x0 0x1000>; + clock-frequency = <0x165a0bc0>; + interrupts = <0x0 0x14 0x4>; + reg-shift = <0x2>; + reg-io-width = <0x4>; + status = "disabled"; + }; + + pcie-internal { + compatible = "annapurna-labs,al-internal-pcie"; + device_type = "pci"; + #size-cells = <0x2>; + #address-cells = <0x3>; + #interrupt-cells = <0x1>; + interrupt-parent = <0x2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = <0x3000 0x0 0x0 0x1 0x2 0x0 0x20 0x4 0x3800 0x0 0x0 0x1 0x2 0x0 0x24 0x4 0x4000 0x0 0x0 0x1 0x2 0x0 0x2b 0x4 0x4800 0x0 0x0 0x1 0x2 0x0 0x2c 0x4>; + ranges = <0x0 0x0 0xfbc00000 0x0 0xfbc00000 0x0 0x100000 0x2000000 0x0 0xfe000000 0x0 0xfe000000 0x0 0x1000000>; + bus-range = <0x0 0x0>; + + eth1@0,1 { + compatible = "pci1c36,0001"; + device_type = "pci"; + reg = <0x00000800 0 0 0 0>; + + mdio0: mdio { + #address-cells = <1>; + #size-cells = <0>; + + status = "disabled"; + }; + }; + }; + + pciee0: pcie-external0 { + compatible = "annapurna-labs,al-pci"; + reg = <0x0 0xfd800000 0x0 0x20000>; + device_type = "pci"; + #size-cells = <0x2>; + #address-cells = <0x3>; + #interrupt-cells = <0x1>; + interrupt-parent = <0x2>; + interrupt-map-mask = <0x0 0x0 0x0 0x0>; + interrupt-map = <0x0 0x0 0x0 0x0 0x5 0x1f4>; + ranges = <0x0 0x0 0xfb600000 0x0 0xfb600000 0x0 0x200000 0x1000000 0x0 0x10000 0x0 0xe0000000 0x0 0x10000 0x2000000 0x0 0xe0010000 0x0 0xe0010000 0x0 0x7ff0000>; + bus-range = <0x0 0xff>; + + pcie-legacy-intc { + interrupt-controller; + #interrupt-cells = <0x1>; + interrupt-parent = <0x2>; + interrupts = <0x0 0x32 0x4>; + linux,phandle = <0x5>; + phandle = <0x5>; + }; + }; + + pcie-external1 { + compatible = "annapurna-labs,al-pci"; + reg = <0x0 0xfd820000 0x0 0x20000>; + device_type = "pci"; + #size-cells = <0x2>; + #address-cells = <0x3>; + #interrupt-cells = <0x1>; + interrupt-parent = <0x2>; + interrupt-map-mask = <0x0 0x0 0x0 0x0>; + interrupt-map = <0x0 0x0 0x0 0x0 0x6 0x1f5>; + ranges = <0x0 0x0 0xfb800000 0x0 0xfb800000 0x0 0x200000 0x1000000 0x0 0x20000 0x0 0xe8000000 0x0 0x10000 0x2000000 0x0 0xe8010000 0x0 0xe8010000 0x0 0x7ff0000>; + bus-range = <0x0 0xff>; + + pcie-legacy-intc { + interrupt-controller; + #interrupt-cells = <0x1>; + interrupt-parent = <0x2>; + interrupts = <0x0 0x33 0x4>; + linux,phandle = <0x6>; + phandle = <0x6>; + }; + }; + + pcie-external2 { + compatible = "annapurna-labs,al-pci"; + reg = <0x0 0xfd840000 0x0 0x20000>; + device_type = "pci"; + #size-cells = <0x2>; + #address-cells = <0x3>; + #interrupt-cells = <0x1>; + interrupt-parent = <0x2>; + interrupt-map-mask = <0x0 0x0 0x0 0x0>; + interrupt-map = <0x0 0x0 0x0 0x0 0x7 0x1f6>; + ranges = <0x0 0x0 0xfba00000 0x0 0xfba00000 0x0 0x200000 0x1000000 0x0 0x30000 0x0 0xf0000000 0x0 0x10000 0x2000000 0x0 0xf0010000 0x0 0xf0010000 0x0 0x7ff0000>; + bus-range = <0x0 0xff>; + + pcie-legacy-intc { + interrupt-controller; + #interrupt-cells = <0x1>; + interrupt-parent = <0x2>; + interrupts = <0x0 0x34 0x4>; + linux,phandle = <0x7>; + phandle = <0x7>; + }; + }; + + tsens: thermal-sensor { + compatible = "annapurna-labs,al-thermal"; + reg = <0x0 0xfd860a00 0x0 0x100>; + #thermal-sensor-cells = <0>; + }; + + nand: nand-flash { + compatible = "annapurna-labs,al-nand"; + reg = <0x0 0xfa100000 0x0 0x202000>; + interrupts = <0x0 0x1 0x4>; + max-onfi-timing-mode = <0x1>; + + status = "disabled"; + }; + + spi { + compatible = "snps,dw-spi-mmio"; + #address-cells = <0x1>; + #size-cells = <0x0>; + reg = <0x0 0xfd882000 0x0 0x1000>; + interrupts = <0x0 0x17 0x4>; + num-chipselect = <0x4>; + bus-num = <0x0>; + clocks = <0x3>; + clock-names = "sbclk"; + }; + + clocks { + #address-cells = <0x1>; + #size-cells = <0x0>; + + refclk { + #clock-cells = <0x0>; + compatible = "fixed-clock"; + clock-frequency = <0x5f5e100>; + }; + + sbclk { + #clock-cells = <0x0>; + compatible = "fixed-clock"; + clock-frequency = <0x165a0bc0>; + linux,phandle = <0x3>; + phandle = <0x3>; + }; + + nbclk { + #clock-cells = <0x0>; + compatible = "fixed-clock"; + clock-frequency = <0x2faf0800>; + }; + + cpuclk { + #clock-cells = <0x0>; + compatible = "fixed-clock"; + clock-frequency = <0xf4240>; + linux,phandle = <0x1>; + phandle = <0x1>; + }; + + fixedclk { + #clock-cells = <0x0>; + compatible = "fixed-clock"; + clock-frequency = <0x3138>; + linux,phandle = <0x4>; + phandle = <0x4>; + }; + }; + + serdes { + compatible = "annapurna-labs,al-serdes"; + reg = <0x0 0xfd8c0000 0x0 0x1000>; + }; + + mc { + compatible = "annapurna-labs,al-mc"; + reg = <0x0 0xfb080000 0x0 0x10000>; + }; + + pinctrl { + compatible = "annapurna-labs,al-pinctrl"; + reg = <0x0 0xfd8a8000 0x0 0x1000>; + + if_nor_8 { + id = "if_nor_8"; + arg = <0x0>; + }; + + if_nor_16 { + id = "if_nor_16"; + arg = <0x0>; + }; + + if_nor_cs_0 { + id = "if_nor_cs_0"; + arg = <0x0>; + }; + + if_nor_cs_1 { + id = "if_nor_cs_1"; + arg = <0x0>; + }; + + if_nor_cs_2 { + id = "if_nor_cs_2"; + arg = <0x0>; + }; + + if_nor_cs_3 { + id = "if_nor_cs_3"; + arg = <0x0>; + }; + + if_nor_wp { + id = "if_nor_wp"; + arg = <0x0>; + }; + + if_nand_8 { + id = "if_nand_8"; + arg = <0x0>; + linux,phandle = <0x8>; + phandle = <0x8>; + }; + + if_nand_16 { + id = "if_nand_16"; + arg = <0x0>; + }; + + if_nand_cs_0 { + id = "if_nand_cs_0"; + arg = <0x0>; + linux,phandle = <0x9>; + phandle = <0x9>; + }; + + if_nand_cs_1 { + id = "if_nand_cs_1"; + arg = <0x0>; + }; + + if_nand_cs_2 { + id = "if_nand_cs_2"; + arg = <0x0>; + }; + + if_nand_cs_3 { + id = "if_nand_cs_3"; + arg = <0x0>; + }; + + if_nand_wp { + id = "if_nand_wp"; + arg = <0x0>; + }; + + if_sram_8 { + id = "if_sram_8"; + arg = <0x0>; + }; + + if_sram_16 { + id = "if_sram_16"; + arg = <0x0>; + }; + + if_sram_cs_0 { + id = "if_sram_cs_0"; + arg = <0x0>; + }; + + if_sram_cs_1 { + id = "if_sram_cs_1"; + arg = <0x0>; + }; + + if_sram_cs_2 { + id = "if_sram_cs_2"; + arg = <0x0>; + }; + + if_sram_cs_3 { + id = "if_sram_cs_3"; + arg = <0x0>; + }; + + if_sata_0_leds { + id = "if_sata_0_leds"; + arg = <0x0>; + }; + + if_sata_1_leds { + id = "if_sata_1_leds"; + arg = <0x0>; + }; + + if_eth_leds { + id = "if_eth_leds"; + arg = <0x0>; + }; + + if_eth_gpio { + id = "if_eth_gpio"; + arg = <0x0>; + }; + + if_uart_1 { + id = "if_uart_1"; + arg = <0x0>; + }; + + if_uart_1_modem { + id = "if_uart_1_modem"; + arg = <0x0>; + linux,phandle = <0xa>; + phandle = <0xa>; + }; + + if_uart_2 { + id = "if_uart_2"; + arg = <0x0>; + }; + + if_uart_3 { + id = "if_uart_3"; + arg = <0x0>; + }; + + if_i2c_gen { + id = "if_i2c_gen"; + arg = <0x0>; + }; + + if_ulpi_0_rst_n { + id = "if_ulpi_0_rst_n"; + arg = <0x0>; + }; + + if_ulpi_1_rst_n { + id = "if_ulpi_1_rst_n"; + arg = <0x0>; + }; + + if_pci_ep_int_a { + id = "if_pci_ep_int_a"; + arg = <0x0>; + }; + + if_pci_ep_reset_out { + id = "if_pci_ep_reset_out"; + arg = <0x0>; + }; + + if_spim_a_ss_1 { + id = "if_spim_a_ss_1"; + arg = <0x0>; + }; + + if_spim_a_ss_2 { + id = "if_spim_a_ss_2"; + arg = <0x0>; + }; + + if_spim_a_ss_3 { + id = "if_spim_a_ss_3"; + arg = <0x0>; + }; + + if_ulpi_1_b { + id = "if_ulpi_1_b"; + arg = <0x0>; + }; + + if_gpio0 { + id = "if_gpio"; + arg = <0x0>; + }; + + if_gpio1 { + id = "if_gpio"; + arg = <0x1>; + }; + + if_gpio2 { + id = "if_gpio"; + arg = <0x2>; + }; + + if_gpio3 { + id = "if_gpio"; + arg = <0x3>; + }; + + if_gpio4 { + id = "if_gpio"; + arg = <0x4>; + }; + + if_gpio5 { + id = "if_gpio"; + arg = <0x5>; + }; + + if_gpio6 { + id = "if_gpio"; + arg = <0x6>; + }; + + if_gpio7 { + id = "if_gpio"; + arg = <0x7>; + }; + + if_gpio8 { + id = "if_gpio"; + arg = <0x8>; + }; + + if_gpio9 { + id = "if_gpio"; + arg = <0x9>; + }; + + if_gpio10 { + id = "if_gpio"; + arg = <0xa>; + }; + + if_gpio11 { + id = "if_gpio"; + arg = <0xb>; + }; + + if_gpio12 { + id = "if_gpio"; + arg = <0xc>; + }; + + if_gpio13 { + id = "if_gpio"; + arg = <0xd>; + }; + + if_gpio14 { + id = "if_gpio"; + arg = <0xe>; + }; + + if_gpio15 { + id = "if_gpio"; + arg = <0xf>; + }; + + if_gpio16 { + id = "if_gpio"; + arg = <0x10>; + }; + + if_gpio17 { + id = "if_gpio"; + arg = <0x11>; + }; + + if_gpio18 { + id = "if_gpio"; + arg = <0x12>; + }; + + if_gpio19 { + id = "if_gpio"; + arg = <0x13>; + }; + + if_gpio20 { + id = "if_gpio"; + arg = <0x14>; + }; + + if_gpio21 { + id = "if_gpio"; + arg = <0x15>; + }; + + if_gpio22 { + id = "if_gpio"; + arg = <0x16>; + }; + + if_gpio23 { + id = "if_gpio"; + arg = <0x17>; + }; + + if_gpio24 { + id = "if_gpio"; + arg = <0x18>; + }; + + if_gpio25 { + id = "if_gpio"; + arg = <0x19>; + }; + + if_gpio26 { + id = "if_gpio"; + arg = <0x1a>; + }; + + if_gpio27 { + id = "if_gpio"; + arg = <0x1b>; + }; + + if_gpio28 { + id = "if_gpio"; + arg = <0x1c>; + }; + + if_gpio29 { + id = "if_gpio"; + arg = <0x1d>; + }; + + if_gpio30 { + id = "if_gpio"; + arg = <0x1e>; + }; + + if_gpio31 { + id = "if_gpio"; + arg = <0x1f>; + }; + + if_gpio32 { + id = "if_gpio"; + arg = <0x20>; + }; + + if_gpio33 { + id = "if_gpio"; + arg = <0x21>; + }; + + if_gpio34 { + id = "if_gpio"; + arg = <0x22>; + }; + + if_gpio35 { + id = "if_gpio"; + arg = <0x23>; + }; + + if_gpio36 { + id = "if_gpio"; + arg = <0x24>; + }; + + if_gpio37 { + id = "if_gpio"; + arg = <0x25>; + }; + + if_gpio38 { + id = "if_gpio"; + arg = <0x26>; + }; + + if_gpio39 { + id = "if_gpio"; + arg = <0x27>; + }; + + if_gpio40 { + id = "if_gpio"; + arg = <0x28>; + }; + + if_gpio41 { + id = "if_gpio"; + arg = <0x29>; + }; + + if_gpio42 { + id = "if_gpio"; + arg = <0x2a>; + }; + + if_gpio43 { + id = "if_gpio"; + arg = <0x2b>; + }; + }; + + mdio1: mdio-al-gpio { + compatible = "annapurna-labs,mdio-al-gpio"; + #address-cells = <1>; + #size-cells = <0>; + + status = "disabled"; + }; + }; + + aliases { + led-boot = &led_system; + led-failsafe = &led_system; + led-running = &led_system; + led-upgrade = &led_system; + }; + + leds: leds { + compatible = "gpio-leds"; + + led_system: system { + label = "white:system"; + gpios = <&gpio2 0x6 0x0>; + default-state = "on"; + }; + + wan { + label = "white:wan"; + gpios = <&gpio2 0x7 0x0>; + }; + + guest { + label = "white:guest"; + gpios = <&gpio4 0x3 0x1>; + }; + + usb1 { + label = "white:usb1"; + gpios = <&gpio4 0x4 0x0>; + }; + + usb2 { + label = "white:usb2"; + gpios = <&gpio4 0x5 0x0>; + }; + + sfp { + label = "white:sfp"; + gpios = <&gpio3 0x6 0x0>; + }; + + radio { + label = "white:radio"; + gpios = <&gpio3 0x5 0x0>; + }; + + wps { + label = "white:wps"; + gpios = <&gpio4 0x7 0x0>; + }; + }; + + keys: keys { + compatible = "gpio-keys"; + + reset { + label = "Reset button"; + gpios = <&gpio3 0x7 0x1>; + linux,code = ; + }; + + rfkill { + label = "Toggle radio button"; + gpios = <&gpio0 0x5 0x1>; + linux,code = ; + }; + + button_wps { + label = "WPS button"; + gpios = <&gpio4 0x0 0x1>; + linux,code = ; + }; + }; + + thermal-zones { + cpu-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + thermal-sensors = <&tsens>; + + trips { + cpu_crit: cpu-crit { + temperature = <95000>; + hysteresis = <2000>; + type = "critical"; + }; + + cpu_alert: cpu-alert { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + }; + }; + }; +}; + +/* + * Switch configuration + * + * There are two switches: master and slave. + * + * Ports P0 and P5 of the master switch are connected to the CPU ports + * ETH1 and ETH2, respectively. u-boot sets up trunking for master's + * ports P0 and P5 (TRUNK0). We need to revert this because ETH1 shall + * be used for LAN and ETH2 for WAN. + * + * Ports P4 and P6 of the master switch are connected with ports P5 and P0 + * of the slave switch, respectively. These ports are set up into trunks: + * - P4 and P6 into TRUNK1 on the master switch + * - P5 and P0 into TRUNK0 on the slave switch + * + * Master switch: + * - P0: CPU ETH1 + * - P1: LAN2 + * - P2: LAN1 + * - P3: WAN + * - P4: connected to slave's P5 + * - P5: CPU ETH2 + * - P6: connected to slave's P0 + * + * Slave switch: + * - P0: connected to master's P6 + * - P1: LAN6 + * - P2: LAN5 + * - P3: LAN4 + * - P4: LAN3 + * - P5: connected to master's P4 + * - P6: - + * + * Trunk means Link Aggregation (LAG). A switch supports up to 4 trunks. + * + * References: + * - https://raw.githubusercontent.com/Deoptim/atheros/master/QCA8337-datasheet.pdf + * - https://kb.netgear.com/2649/NETGEAR-Open-Source-Code-for-Programmers-GPL + * - package/base-files/files/lib/cfgmgr/enet.sh + * - git_home/u-boot.git/board/annapurna-labs/alpine_hw29765235p0p512p1024p4x4p4x4_32_db/qca8337.c + * - https://elixir.bootlin.com/linux/v5.17.3/source/drivers/net/dsa/qca8k.c + * - https://lkml.kernel.org/netdev/20201202091356.24075-3-tobias@waldekranz.com/ + * - https://forum.openwrt.org/t/netgear-x10-r9000/3913/522?u=egorenar + */ + +&mdio0 { + status = "okay"; + + ethernet-phy@0 { + device_type = "ethernet-phy"; + reg = <0>; + qca,mib-poll-interval = <500>; + + qca,ar8327-initvals = < + 0x00004 0x7680000 /* PORT0_PAD_CTRL */ + 0x00008 0x7600000 /* PORT5_PAD_CTRL */ + 0x0000c 0x80 /* PORT6_PAD_CTRL */ + 0x000e0 0xc74164de /* PAD_SGMII_CTRL*/ + 0x000e4 0xea545 /* PAD_MAC_PWR_SEL */ + 0x00010 0x2613a0 /* POS */ + 0x0007c 0x4e /* PORT0_STATUS */ + 0x0008c 0x7e /* PORT4_STATUS */ + 0x00090 0x4e /* PORT5_STATUS */ + 0x00094 0x7e /* PORT6_STATUS */ + /* Trunk 1: P4 + P6 */ + 0x00700 0xd000 /* GOL_TRUNK_CTRL0 */ + 0x00704 0xec0000 /* GOL_TRUNK_CTRL1 */ + 0x00808 0x7f004e /* QM_CTRL_REG */ + >; + }; +}; + +&mdio1 { + status = "okay"; + + ethernet-phy@0 { + device_type = "ethernet-phy"; + reg = <0>; + qca,mib-poll-interval = <500>; + + qca,ar8327-initvals = < + 0x00004 0x80080 /* PORT0_PAD_CTRL */ + 0x000e0 0xc74164de /* PAD_SGMII_CTRL */ + 0x000e4 0xaa545 /* PAD_MAC_PWR_SEL */ + 0x00010 0x2613a0 /* POS */ + 0x0007c 0x7e /* PORT0_STATUS */ + 0x00090 0x7e /* PORT5_STATUS */ + /* Trunk 0: P0 + P5 */ + 0x00700 0xa1 /* GOL_TRUNK_CTRL0 */ + 0x00704 0xd8 /* GOL_TRUNK_CTRL1 */ + 0x00808 0x7f004e /* QM_CTRL_REG */ + >; + }; +}; + +&nand { + status = "okay"; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + u_boot { + label = "u-boot"; + reg = <0x00000000 0x001e0000>; + read-only; + }; + + u_boot_env { + label = "u-boot-env"; + reg = <0x001e0000 0x00120000>; + }; + + ART { + label = "ART"; + reg = <0x00300000 0x00140000>; + read-only; + + nvmem-layout { + compatible = "fixed-layout"; + #address-cells = <1>; + #size-cells = <1>; + + macaddr_art_0: macaddr@0 { + reg = <0x0 0x6>; + }; + + macaddr_art_6: macaddr@6 { + reg = <0x6 0x6>; + }; + + macaddr_art_c: macaddr@c { + compatible = "mac-base"; + reg = <0xc 0x6>; + #nvmem-cell-cells = <1>; + }; + + precal_art_1000: precal@1000 { + reg = <0x1000 0x2f20>; + }; + + precal_art_5000: precal@5000 { + reg = <0x5000 0x2f20>; + }; + }; + }; + + ART_bak { + label = "ART.bak"; + reg = <0x00440000 0x00140000>; + read-only; + }; + + kernel { + label = "kernel"; + reg = <0x00580000 0x02000000>; + }; + + ubi { + label = "ubi"; + reg = <0x02580000 0x1ca80000>; + }; + + recovery { + label = "recovery"; + reg = <0x1f000000 0x01000000>; + }; + }; +}; + +&pciee0 { + /* + * relevant lspci topology + * + * -[0001:00]---00.0-[01-04]----00.0-[02-04]--+-03.0-[03]----00.0 + * \-07.0-[04]----00.0 + */ + + bridge@0,0 { + device_type = "pci"; + reg = <0 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + ranges; + + bridge@1,0 { + device_type = "pci"; + reg = <0 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + ranges; + + bridge@2,3 { + device_type = "pci"; + reg = <0x1800 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + ranges; + + wifi5g: wifi@3,0 { + compatible = "pci168c,0046"; + reg = <0 0 0 0 0>; + + nvmem-cells = <&macaddr_art_c 1>, <&precal_art_1000>; + nvmem-cell-names = "mac-address", "pre-calibration"; + }; + }; + + bridge@2,7 { + device_type = "pci"; + reg = <0x3800 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + ranges; + + wifi2g: wifi@4,0 { + compatible = "pci168c,0046"; + reg = <0 0 0 0 0>; + + nvmem-cells = <&macaddr_art_c 2>, <&precal_art_5000>; + nvmem-cell-names = "mac-address", "pre-calibration"; + }; + }; + }; + }; +}; diff --git a/target/linux/alpine/files/arch/arm/boot/dts/alpine-r9000.dts b/target/linux/alpine/files/arch/arm/boot/dts/alpine-r9000.dts new file mode 100644 index 00000000000000..46fe0b36d2307e --- /dev/null +++ b/target/linux/alpine/files/arch/arm/boot/dts/alpine-r9000.dts @@ -0,0 +1,6 @@ + +#include "alpine-netgear.dtsi" + +/ { + compatible = "netgear,r9000", "annapurna-labs,alpine"; +}; diff --git a/target/linux/alpine/files/arch/arm/boot/dts/alpine-xr700.dts b/target/linux/alpine/files/arch/arm/boot/dts/alpine-xr700.dts new file mode 100644 index 00000000000000..3383d2bb7dc0c8 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/boot/dts/alpine-xr700.dts @@ -0,0 +1,6 @@ + +#include "alpine-netgear.dtsi" + +/ { + compatible = "netgear,xr700", "netgear,r9000", "annapurna-labs,alpine"; +}; diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/Makefile b/target/linux/alpine/files/arch/arm/mach-alpine/Makefile new file mode 100644 index 00000000000000..253530afae581a --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/Makefile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0-only + +ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include + +obj-y += alpine_machine.o +obj-y += al_hal_serdes.o +obj-y += al_fabric.o +obj-y += al_init_sys_fabric.o +obj-y += al_hal_iofic.o +obj-y += al_hal_m2m_udma.o +obj-y += al_hal_ssm.o +obj-y += al_hal_udma_main.o +obj-y += al_hal_udma_iofic.o +obj-y += al_hal_udma_fast.o +obj-y += al_hal_udma_config.o +obj-y += al_hal_udma_debug.o +obj-y += al_hal_exports.o +obj-$(CONFIG_SMP) += platsmp.o alpine_cpu_pm.o +obj-$(CONFIG_PCI) += pcie_of.o +obj-$(CONFIG_PCI) += al_hal_pcie.o +obj-$(CONFIG_PCI) += al_hal_pcie_interrupts.o +obj-$(CONFIG_PCI_MSI) += irq-alpine-msi.o +obj-$(CONFIG_HOTPLUG_CPU) += sleep-alpine.o +ifneq ($(CONFIG_HOTPLUG_CPU), y) +obj-$(CONFIG_CPU_IDLE_ALPINE) += sleep-alpine.o +endif + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/Makefile.boot b/target/linux/alpine/files/arch/arm/mach-alpine/Makefile.boot new file mode 100644 index 00000000000000..67039c3e0c48fa --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/Makefile.boot @@ -0,0 +1,3 @@ + zreladdr-y := 0x00008000 +params_phys-y := 0x00000100 +initrd_phys-y := 0x00800000 diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_fabric.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_fabric.c new file mode 100644 index 00000000000000..e74a20a8e8be89 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_fabric.c @@ -0,0 +1,298 @@ +/* + * Annapurna labs fabric. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "al_hal_nb_regs.h" +#include "al_init_sys_fabric.h" + +static struct of_device_id of_fabric_table[] = { + {.compatible = "annapurna-labs,al-fabric"}, + { /* end of list */ }, +}; + +static struct of_device_id of_nb_table[] = { + {.compatible = "annapurna-labs,al-nb-service"}, + { /* end of list */ }, +}; + +static struct of_device_id of_ccu_table[] = { + {.compatible = "annapurna-labs,al-ccu"}, + { /* end of list */ }, +}; + +struct sys_fabric_irq_struct { + unsigned int idx; + void __iomem *regs_base; + unsigned int irq_cause_base; + struct irq_chip_generic *irq_gc; +}; + +static struct sys_fabric_irq_struct sf_irq_arr[AL_FABRIC_INSTANCE_N]; + +int al_fabric_hwcc; + +static int al_fabric_plat_device_notifier(struct notifier_block *nb, + unsigned long event, void *__dev) +{ + struct device *dev = __dev; + + if (event != BUS_NOTIFY_ADD_DEVICE) + return NOTIFY_DONE; + +#ifdef CONFIG_ARM_HWCC_FLAG + dev->archdata.hwcc = al_fabric_hwcc; +#endif + dma_set_coherent_mask(dev, PHYS_MASK); + + if (!al_fabric_hwcc) + return NOTIFY_OK; + + dev->dma_coherent = true; + + return NOTIFY_OK; +} + +static struct notifier_block al_fabric_plat_device_nb = { + .notifier_call = al_fabric_plat_device_notifier, +}; + + +static int al_fabric_pci_device_notifier(struct notifier_block *nb, + unsigned long event, void *__dev) +{ + struct device *dev = __dev; + struct pci_dev *pdev = to_pci_dev(dev); + u32 temp; + + if (event != BUS_NOTIFY_BIND_DRIVER) + return NOTIFY_DONE; + +#ifdef CONFIG_ARM_HWCC_FLAG + dev->archdata.hwcc = al_fabric_hwcc; +#endif + dma_set_coherent_mask(dev, PHYS_MASK); + + if (!al_fabric_hwcc) + return NOTIFY_OK; + + dev->dma_coherent = true; + + /* Force the PCIE adapter to set AXI attr to match CC*/ + if(pci_domain_nr(pdev->bus) == 0) { + pci_read_config_dword(pdev, 0x110 ,&temp); + temp |= 0x3; + pci_write_config_dword(pdev, 0x110 ,temp); + /* Enable cache coherency for VF's (except USB and SATA) */ + if (PCI_SLOT(pdev->devfn) < 6) { + pci_write_config_dword(pdev, 0x130 ,temp); + pci_write_config_dword(pdev, 0x150 ,temp); + pci_write_config_dword(pdev, 0x170 ,temp); + } + + pci_read_config_dword(pdev, 0x220 ,&temp); + temp &= ~0xffff; + temp |= 0x3ff; + pci_write_config_dword(pdev, 0x220 ,temp); + } + + return NOTIFY_OK; +} + +static struct notifier_block al_fabric_pci_device_nb = { + .notifier_call = al_fabric_pci_device_notifier, +}; + +static void sf_irq_handler(struct irq_desc *desc) +{ + unsigned long pending, mask; + int offset; + struct sys_fabric_irq_struct *chip = irq_desc_get_handler_data(desc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + struct al_nb_regs *nb_regs = chip->regs_base; + + chained_irq_enter(irqchip, desc); + + mask = chip->irq_gc->mask_cache; + pending = readl(&nb_regs->global.nb_int_cause) & mask; + + /* deassert pending edge-triggered irqs */ + writel(~(pending & ~NB_GLOBAL_NB_INT_CAUSE_LEVEL_IRQ_MASK), + &nb_regs->global.nb_int_cause); + + /* handle pending irqs */ + if (likely(pending)) { + int fabric_irq_base = al_fabric_get_cause_irq(chip->idx, 0); + for_each_set_bit(offset, &pending, AL_FABRIC_IRQ_N) + generic_handle_irq(fabric_irq_base + offset); + } + + /* deassert pending level-triggered irqs */ + writel(~(pending & NB_GLOBAL_NB_INT_CAUSE_LEVEL_IRQ_MASK), + &nb_regs->global.nb_int_cause); + + chained_irq_exit(irqchip, desc); +} + +static void init_sf_irq_gc(struct sys_fabric_irq_struct *sfi) +{ + struct irq_chip_type *ct; + + sfi->irq_gc = irq_alloc_generic_chip("alpine_sf_irq", 1, + sfi->irq_cause_base, sfi->regs_base, handle_simple_irq); + sfi->irq_gc->private = sfi; + + ct = sfi->irq_gc->chip_types; + ct->chip.irq_mask = irq_gc_mask_clr_bit; + ct->chip.irq_unmask = irq_gc_mask_set_bit; + ct->regs.mask = offsetof(struct al_nb_regs, + cpun_config_status[sfi->idx].local_cause_mask); + + /* clear the no request field so irq can be requested */ + irq_setup_generic_chip(sfi->irq_gc, IRQ_MSK(AL_FABRIC_IRQ_N), + IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0); +} + +static int init_sf_irq_struct(struct sys_fabric_irq_struct *sfi_arr, + unsigned int idx, void __iomem *regs_base) +{ + int ret; + + pr_debug("[%s] entered with idx = %d, regs_base = %p\n", + __func__, idx, regs_base); + sfi_arr[idx].idx = idx; + sfi_arr[idx].regs_base = regs_base; + /* allocate irq descriptors for the cause interrupts */ + ret = irq_alloc_descs(-1, 0, AL_FABRIC_IRQ_N, -1); + if (ret < 0) { + pr_err("[%s] Failed to allocate IRQ descriptors\n", __func__); + return ret; + } + sfi_arr[idx].irq_cause_base = ret; + init_sf_irq_gc(&sfi_arr[idx]); + return 0; +} + +int al_fabric_get_cause_irq(unsigned int idx, int irq) +{ + return sf_irq_arr[idx].irq_cause_base + irq; +} + +int al_fabric_hwcc_enabled(void) +{ + return al_fabric_hwcc; +} +EXPORT_SYMBOL(al_fabric_hwcc_enabled); + +int __init al_fabric_init(void) +{ + struct device_node *ccu_node; + struct device_node *nb_node; + void __iomem *nb_base_address; + void __iomem *ccu_address; + void __iomem *nb_service_base_address; + u32 prop; + bool dev_ord_relax; + int nb_serv_irq[AL_FABRIC_INSTANCE_N]; + int i, ret; + + pr_info("Initializing System Fabric\n"); + + nb_node = of_find_matching_node(NULL, of_nb_table); + ccu_node = of_find_matching_node(NULL, of_ccu_table); + + + if (!nb_node) + return -EINVAL; + + if (ccu_node) { + /* new devicetree */ + ccu_address = of_iomap(ccu_node, 0); + BUG_ON(!ccu_address); + + dev_ord_relax = + !of_property_read_u32(nb_node, "dev_ord_relax", &prop) + && prop; + } else { + /* old devicetree */ + ccu_node = of_find_matching_node(NULL, of_fabric_table); + if (!ccu_node) + return -EINVAL; + nb_base_address = of_iomap(ccu_node, 0); + BUG_ON(!nb_base_address); + ccu_address = nb_base_address + 0x90000; + + dev_ord_relax = + !of_property_read_u32(ccu_node, "dev_ord_relax", &prop) + && prop; + } + if (ccu_node && of_device_is_available(ccu_node)) { + al_fabric_hwcc = !of_property_read_u32(ccu_node, "io_coherency", &prop) + && prop; + + if (al_fabric_hwcc) + printk("Enabling IO Cache Coherency.\n"); + + al_ccu_init(ccu_address, al_fabric_hwcc); + + bus_register_notifier(&platform_bus_type, + &al_fabric_plat_device_nb); + bus_register_notifier(&pci_bus_type, + &al_fabric_pci_device_nb); + } + + if (nb_node) { + nb_service_base_address = of_iomap(nb_node, 0); + BUG_ON(!nb_service_base_address); + al_nbservice_init(nb_service_base_address, dev_ord_relax); + + for (i = 0 ; i < AL_FABRIC_INSTANCE_N ; ++i) { + ret = init_sf_irq_struct(sf_irq_arr, i, + nb_service_base_address); + if (ret < 0) { + pr_err("[%s] Failed to initialize sys-fabric " + "irq struct\n", __func__); + return ret; + } + nb_serv_irq[i] = irq_of_parse_and_map(nb_node, i); + irq_set_chained_handler(nb_serv_irq[i], sf_irq_handler); + ret = irq_set_handler_data(nb_serv_irq[i], + &sf_irq_arr[i]); + if (ret < 0) { + pr_err("[%s] Failed to set irq handler data\n" + , __func__); + return ret; + } + } + + } + + return 0; +} + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_exports.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_exports.c new file mode 100644 index 00000000000000..f6f3ad18749b9c --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_exports.c @@ -0,0 +1,81 @@ +/* +* Copyright (C) 2015 Annapurna Labs Ltd. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; version 2 of the License. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +EXPORT_SYMBOL(al_iofic_moder_res_config); +EXPORT_SYMBOL(al_udma_q_handle_get); +EXPORT_SYMBOL(al_udma_m2s_packet_size_cfg_set); +EXPORT_SYMBOL(al_udma_q_init); +EXPORT_SYMBOL(al_iofic_read_cause); +EXPORT_SYMBOL(al_udma_cdesc_packet_get); +EXPORT_SYMBOL(al_iofic_msix_moder_interval_config); +EXPORT_SYMBOL(al_udma_iofic_config); +EXPORT_SYMBOL(al_udma_init); +EXPORT_SYMBOL(al_iofic_config); +EXPORT_SYMBOL(al_udma_states_name); +EXPORT_SYMBOL(al_udma_state_set); +EXPORT_SYMBOL(al_udma_iofic_unmask_offset_get); +EXPORT_SYMBOL(al_iofic_mask); +EXPORT_SYMBOL(al_iofic_unmask); +EXPORT_SYMBOL(al_iofic_clear_cause); +EXPORT_SYMBOL(al_udma_state_get); +EXPORT_SYMBOL(al_udma_q_struct_print); +EXPORT_SYMBOL(al_udma_regs_print); +EXPORT_SYMBOL(al_udma_ring_print); +EXPORT_SYMBOL(al_m2m_udma_handle_get); +EXPORT_SYMBOL(al_m2m_udma_state_set); +EXPORT_SYMBOL(al_m2m_udma_q_init); +EXPORT_SYMBOL(al_m2m_udma_init); +EXPORT_SYMBOL(al_serdes_tx_deemph_inc); +EXPORT_SYMBOL(al_serdes_signal_is_detected); +EXPORT_SYMBOL(al_serdes_rx_advanced_params_set); +EXPORT_SYMBOL(al_serdes_tx_advanced_params_set); +EXPORT_SYMBOL(al_serdes_eye_measure_run); +EXPORT_SYMBOL(al_udma_m2s_max_descs_set); +EXPORT_SYMBOL(al_serdes_tx_deemph_dec); +EXPORT_SYMBOL(al_serdes_handle_init); +EXPORT_SYMBOL(al_serdes_tx_deemph_preset); +EXPORT_SYMBOL(al_serdes_pma_hard_reset_lane); +EXPORT_SYMBOL(al_ssm_dma_init); +EXPORT_SYMBOL(al_ssm_dma_q_init); +EXPORT_SYMBOL(al_ssm_dma_state_set); +EXPORT_SYMBOL(al_ssm_dma_handle_get); +EXPORT_SYMBOL(al_ssm_dma_rx_queue_handle_get); +EXPORT_SYMBOL(al_ssm_dma_tx_queue_handle_get); +EXPORT_SYMBOL(al_udma_fast_memcpy_q_prepare); +EXPORT_SYMBOL(al_udma_q_enable); +EXPORT_SYMBOL(al_serdes_reg_read); +EXPORT_SYMBOL(al_udma_s2m_compl_desc_burst_config); +EXPORT_SYMBOL(al_udma_gen_vmid_conf_set); +EXPORT_SYMBOL(al_serdes_rx_equalization); +EXPORT_SYMBOL(al_serdes_tx_advanced_params_get); +EXPORT_SYMBOL(al_serdes_rx_advanced_params_get); +EXPORT_SYMBOL(al_udma_q_reset); +EXPORT_SYMBOL(al_serdes_reg_write); +EXPORT_SYMBOL(al_udma_s2m_max_descs_set); +EXPORT_SYMBOL(al_udma_s2m_q_compl_coal_config); diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_iofic.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_iofic.c new file mode 100644 index 00000000000000..593b88cfe2f1c5 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_iofic.c @@ -0,0 +1,245 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @{ + * @file al_hal_iofic.c + * + * @brief interrupt controller hal + * + */ + +#include +#include + +/* + * configure the interrupt registers, interrupts will are kept masked + */ +int al_iofic_config(void __iomem *regs_base, int group, uint32_t flags) +{ + struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base); + + al_assert(regs_base); + al_assert(group < AL_IOFIC_MAX_GROUPS); + + al_reg_write32(®s->ctrl[group].int_control_grp, flags); + + return 0; +} + +/* + * configure the moderation timer resolution for a given group + */ +int al_iofic_moder_res_config(void __iomem *regs_base, int group, + uint8_t resolution) + +{ + struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base); + uint32_t reg; + + al_assert(regs_base); + al_assert(group < AL_IOFIC_MAX_GROUPS); + + reg = al_reg_read32(®s->ctrl[group].int_control_grp); + AL_REG_FIELD_SET(reg, + INT_CONTROL_GRP_MOD_RES_MASK, + INT_CONTROL_GRP_MOD_RES_SHIFT, + resolution); + al_reg_write32(®s->ctrl[group].int_control_grp, reg); + + return 0; +} + +/* + * configure the moderation timer interval for a given legacy interrupt group + */ +int al_iofic_legacy_moder_interval_config(void __iomem *regs_base, int group, + uint8_t interval) +{ + struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base); + uint32_t reg; + + al_assert(regs_base); + al_assert(group < AL_IOFIC_MAX_GROUPS); + + reg = al_reg_read32(®s->ctrl[group].int_control_grp); + AL_REG_FIELD_SET(reg, + INT_CONTROL_GRP_MOD_INTV_MASK, + INT_CONTROL_GRP_MOD_INTV_SHIFT, + interval); + al_reg_write32(®s->ctrl[group].int_control_grp, reg); + + return 0; +} + + +/* + * configure the moderation timer interval for a given msix vector. + */ +int al_iofic_msix_moder_interval_config(void __iomem *regs_base, int group, + uint8_t vector, uint8_t interval) +{ + struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base); + uint32_t reg; + + al_assert(regs_base); + al_assert(group < AL_IOFIC_MAX_GROUPS); + + reg = al_reg_read32(®s->grp_int_mod[group][vector].grp_int_mod_reg); + AL_REG_FIELD_SET(reg, + INT_MOD_INTV_MASK, + INT_MOD_INTV_SHIFT, + interval); + al_reg_write32(®s->grp_int_mod[group][vector].grp_int_mod_reg, reg); + + return 0; +} + +/* + * return the offset of the unmask register for a given group + */ +uint32_t __iomem * al_iofic_unmask_offset_get(void __iomem *regs_base, int group) +{ + struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base); + + al_assert(regs_base); + al_assert(group < AL_IOFIC_MAX_GROUPS); + + return ®s->ctrl[group].int_mask_clear_grp; +} + + +/* + * unmask specific interrupts for a given group + */ +void al_iofic_unmask(void __iomem *regs_base, int group, uint32_t mask) +{ + struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base); + + al_assert(regs_base); + al_assert(group < AL_IOFIC_MAX_GROUPS); + + /* + * use the mask clear register, no need to read the mask register + * itself. write 0 to unmask, 1 has no effect + */ + al_reg_write32_relaxed(®s->ctrl[group].int_mask_clear_grp, ~mask); +} + +/* + * mask specific interrupts for a given group + */ +void al_iofic_mask(void __iomem *regs_base, int group, uint32_t mask) +{ + struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base); + uint32_t reg; + + al_assert(regs_base); + al_assert(group < AL_IOFIC_MAX_GROUPS); + + reg = al_reg_read32(®s->ctrl[group].int_mask_grp); + + al_reg_write32(®s->ctrl[group].int_mask_grp, reg | mask); +} + +/* + * read the mask for a given group + */ +uint32_t al_iofic_read_mask(void __iomem *regs_base, int group) +{ + struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base); + + al_assert(regs_base); + al_assert(group < AL_IOFIC_MAX_GROUPS); + + return al_reg_read32(®s->ctrl[group].int_mask_grp); +} + +/* + * read interrupt cause register for a given group + */ +uint32_t al_iofic_read_cause(void __iomem *regs_base, int group) +{ + struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base); + + al_assert(regs_base); + al_assert(group < AL_IOFIC_MAX_GROUPS); + + return al_reg_read32(®s->ctrl[group].int_cause_grp); +} + +/* + * clear bits in the interrupt cause register for a given group + */ +void al_iofic_clear_cause(void __iomem *regs_base, int group, uint32_t mask) +{ + struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base); + + al_assert(regs_base); + al_assert(group < AL_IOFIC_MAX_GROUPS); + + /* inverse mask, writing 1 has no effect */ + al_reg_write32(®s->ctrl[group].int_cause_grp, ~mask); +} + +/* + * Set the cause register for a given group + */ +void al_iofic_set_cause(void __iomem *regs_base, int group, uint32_t mask) +{ + struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base); + + al_assert(regs_base); + al_assert(group < AL_IOFIC_MAX_GROUPS); + + al_reg_write32(®s->ctrl[group].int_cause_set_grp, mask); +} + + +/* + * unmask specific interrupts from aborting the udma a given group + */ +void al_iofic_abort_mask(void __iomem *regs_base, int group, uint32_t mask) +{ + struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base); + + al_assert(regs_base); + al_assert(group < AL_IOFIC_MAX_GROUPS); + + al_reg_write32(®s->ctrl[group].int_abort_msk_grp, mask); + +} + +/** @} end of interrupt controller group */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_m2m_udma.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_m2m_udma.c new file mode 100644 index 00000000000000..819c99fef95c93 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_m2m_udma.c @@ -0,0 +1,165 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @file al_hal_m2m_udma.c + * + * @brief HAL driver for DMA that is a compound of M2S and S2M UDMAs. + * + */ + +#include +#include + +/* Init M2M UDMA */ +/* + * initialize M2M UDMA + */ +int al_m2m_udma_init(struct al_m2m_udma *m2m_udma, + struct al_m2m_udma_params *params) +{ + struct al_udma_params dma_params; + int rc; + + al_dbg("raid [%s]: Initialize unit\n", params->name); + + m2m_udma->name = params->name; + m2m_udma->m2s_regs_base = params->m2s_regs_base; + m2m_udma->s2m_regs_base = params->s2m_regs_base; + m2m_udma->num_of_queues = params->num_of_queues; + + /* initialize tx udma component */ + dma_params.udma_reg = + (union udma_regs __iomem *)m2m_udma->m2s_regs_base; + dma_params.type = UDMA_TX; + dma_params.num_of_queues = m2m_udma->num_of_queues; + dma_params.name = "tx dma"; + + rc = al_udma_init(&m2m_udma->tx_udma, &dma_params); + if (rc != 0) { + al_err("failed to initialize %s, error %d\n", + dma_params.name, rc); + return rc; + } + + al_udma_m2s_max_descs_set(&m2m_udma->tx_udma, params->max_m2s_descs_per_pkt); + + /* initialize rx udma component */ + dma_params.udma_reg = + (union udma_regs __iomem *)m2m_udma->s2m_regs_base; + dma_params.type = UDMA_RX; + dma_params.num_of_queues = m2m_udma->num_of_queues; + dma_params.name = "rx dma"; + + rc = al_udma_init(&m2m_udma->rx_udma, &dma_params); + if (rc != 0) { + al_err("failed to initialize %s, error %d\n", + dma_params.name, rc); + return rc; + } + + al_udma_s2m_max_descs_set(&m2m_udma->rx_udma, params->max_s2m_descs_per_pkt); + + return 0; +} + +/* + * initialize the m2s(tx) and s2m(tx) udmas of the queue + */ +int al_m2m_udma_q_init(struct al_m2m_udma *m2m_udma, uint32_t qid, + struct al_udma_q_params *tx_params, + struct al_udma_q_params *rx_params) +{ + int rc; + + al_dbg("udma [%s]: Initialize queue %d\n", m2m_udma->name, + qid); + + rc = al_udma_q_init(&m2m_udma->tx_udma, qid, tx_params); + if (rc != 0) { + al_err("[%s]: failed to initialize tx q %d, error %d\n", + m2m_udma->name, qid, rc); + return rc; + } + + rc = al_udma_q_init(&m2m_udma->rx_udma, qid, rx_params); + if (rc != 0) { + al_err("[%s]: failed to initialize rx q %d, error %d\n", + m2m_udma->name, qid, rc); + return rc; + } + return 0; + +} + +/* + * Change the M2M UDMA state + */ +int al_m2m_udma_state_set(struct al_m2m_udma *m2m_udma, + enum al_udma_state udma_state) +{ + int rc; + + rc = al_udma_state_set(&m2m_udma->tx_udma, udma_state); + if (rc != 0) { + al_err("[%s]: failed to change state, error %d\n", + m2m_udma->name, rc); + return rc; + } + + rc = al_udma_state_set(&m2m_udma->rx_udma, udma_state); + if (rc != 0) { + al_err("[%s]: failed to change state, error %d\n", + m2m_udma->name, rc); + return rc; + } + + return 0; +} + +/* + * Get udma handle of the tx or rx udma, this handle can be used to call misc + * configuration functions defined at al_udma_config.h + */ +int al_m2m_udma_handle_get(struct al_m2m_udma *m2m_udma, + enum al_udma_type type, + struct al_udma **udma) +{ + if (type == UDMA_TX) + *udma = &m2m_udma->tx_udma; + else + *udma = &m2m_udma->rx_udma; + return 0; +} diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_nb_regs.h b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_nb_regs.h new file mode 100644 index 00000000000000..26ecb25748f780 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_nb_regs.h @@ -0,0 +1,1381 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + + +#ifndef __AL_HAL_NB_REGS_H__ +#define __AL_HAL_NB_REGS_H__ + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + + + +struct al_nb_global { + /* [0x0] */ + uint32_t cpus_config; + /* [0x4] */ + uint32_t cpus_secure; + /* [0x8] Force init reset. */ + uint32_t cpus_init_control; + /* [0xc] Force init reset per decei mode. */ + uint32_t cpus_init_status; + /* [0x10] */ + uint32_t nb_int_cause; + /* [0x14] */ + uint32_t sev_int_cause; + /* [0x18] */ + uint32_t pmus_int_cause; + /* [0x1c] */ + uint32_t sev_mask; + /* [0x20] */ + uint32_t cpus_hold_reset; + /* [0x24] */ + uint32_t cpus_software_reset; + /* [0x28] */ + uint32_t wd_timer0_reset; + /* [0x2c] */ + uint32_t wd_timer1_reset; + /* [0x30] */ + uint32_t wd_timer2_reset; + /* [0x34] */ + uint32_t wd_timer3_reset; + /* [0x38] */ + uint32_t ddrc_hold_reset; + /* [0x3c] */ + uint32_t fabric_software_reset; + /* [0x40] */ + uint32_t cpus_power_ctrl; + uint32_t rsrvd_0[7]; + /* [0x60] */ + uint32_t acf_base_high; + /* [0x64] */ + uint32_t acf_base_low; + /* [0x68] */ + uint32_t acf_control_override; + /* [0x6c] Read-only that reflects CPU Cluster Local GIC base ... */ + uint32_t lgic_base_high; + /* [0x70] Read-only that reflects CPU Cluster Local GIC base ... */ + uint32_t lgic_base_low; + /* [0x74] Read-only that reflects the device's IOGIC base hig ... */ + uint32_t iogic_base_high; + /* [0x78] Read-only that reflects IOGIC base low address */ + uint32_t iogic_base_low; + /* [0x7c] */ + uint32_t io_wr_split_control; + /* [0x80] */ + uint32_t io_rd_rob_control; + /* [0x84] */ + uint32_t sb_pos_error_log_1; + /* [0x88] */ + uint32_t sb_pos_error_log_0; + /* [0x8c] */ + uint32_t c2swb_config; + /* [0x90] */ + uint32_t msix_error_log; + /* [0x94] */ + uint32_t error_cause; + /* [0x98] */ + uint32_t error_mask; + uint32_t rsrvd_1; + /* [0xa0] */ + uint32_t qos_peak_control; + /* [0xa4] */ + uint32_t qos_set_control; + /* [0xa8] */ + uint32_t ddr_qos; + uint32_t rsrvd_2[9]; + /* [0xd0] */ + uint32_t acf_misc; + /* [0xd4] */ + uint32_t config_bus_control; + uint32_t rsrvd_3[10]; + /* [0x100] */ + uint32_t cpu_max_pd_timer; + /* [0x104] */ + uint32_t cpu_max_pu_timer; + uint32_t rsrvd_4[2]; + /* [0x110] */ + uint32_t auto_ddr_self_refresh_counter; + uint32_t rsrvd_5[3]; + /* [0x120] */ + uint32_t coresight_pd; + /* [0x124] */ + uint32_t coresight_internal_0; + /* [0x128] */ + uint32_t coresight_dbgromaddr; + /* [0x12c] */ + uint32_t coresight_dbgselfaddr; + /* [0x130] */ + uint32_t coresght_targetid; + /* [0x134] */ + uint32_t coresght_targetid0; + uint32_t rsrvd[946]; +}; +struct al_nb_system_counter { + /* [0x0] */ + uint32_t cnt_control; + /* [0x4] */ + uint32_t cnt_base_freq; + /* [0x8] */ + uint32_t cnt_low; + /* [0xc] */ + uint32_t cnt_high; + /* [0x10] */ + uint32_t cnt_init_low; + /* [0x14] */ + uint32_t cnt_init_high; + uint32_t rsrvd[58]; +}; +struct al_nb_rams_control_misc { + /* [0x0] */ + uint32_t ca15_rf_misc; + uint32_t rsrvd_0; + /* [0x8] */ + uint32_t nb_rf_misc; + uint32_t rsrvd[61]; +}; +struct al_nb_ca15_rams_control { + /* [0x0] */ + uint32_t rf_0; + /* [0x4] */ + uint32_t rf_1; + /* [0x8] */ + uint32_t rf_2; + uint32_t rsrvd; +}; +struct al_nb_semaphores { + /* [0x0] This configration is only sampled during reset of t ... */ + uint32_t lockn; +}; +struct al_nb_debug { + /* [0x0] */ + uint32_t ca15_outputs_1; + /* [0x4] */ + uint32_t ca15_outputs_2; + uint32_t rsrvd_0[2]; + /* [0x10] */ + uint32_t cpu_msg[4]; + /* [0x20] */ + uint32_t rsv0_config; + /* [0x24] */ + uint32_t rsv1_config; + uint32_t rsrvd_1[2]; + /* [0x30] */ + uint32_t rsv0_status; + /* [0x34] */ + uint32_t rsv1_status; + uint32_t rsrvd_2[2]; + /* [0x40] */ + uint32_t ddrc; + /* [0x44] */ + uint32_t ddrc_phy_smode_control; + /* [0x48] */ + uint32_t ddrc_phy_smode_status; + uint32_t rsrvd_3[5]; + /* [0x60] */ + uint32_t pmc; + uint32_t rsrvd_4[3]; + /* [0x70] */ + uint32_t cpus_general; + uint32_t rsrvd_5[3]; + /* [0x80] */ + uint32_t cpus_int_out; + uint32_t rsrvd_6[31]; + /* [0x100] */ + uint32_t track_dump_ctrl; + /* [0x104] */ + uint32_t track_dump_rdata_0; + /* [0x108] */ + uint32_t track_dump_rdata_1; + uint32_t rsrvd_7[5]; + /* [0x120] */ + uint32_t track_events; + uint32_t rsrvd_8[3]; + /* [0x130] */ + uint32_t pos_track_dump_ctrl; + /* [0x134] */ + uint32_t pos_track_dump_rdata_0; + /* [0x138] */ + uint32_t pos_track_dump_rdata_1; + uint32_t rsrvd_9; + /* [0x140] */ + uint32_t c2swb_track_dump_ctrl; + /* [0x144] */ + uint32_t c2swb_track_dump_rdata_0; + /* [0x148] */ + uint32_t c2swb_track_dump_rdata_1; + uint32_t rsrvd_10[5]; + /* [0x160] */ + uint32_t c2swb_bar_ovrd_high; + /* [0x164] */ + uint32_t c2swb_bar_ovrd_low; + uint32_t rsrvd[38]; +}; +struct al_nb_cpun_config_status { + /* [0x0] This configration is only sampled during reset of t ... */ + uint32_t config; + uint32_t rsrvd_0; + /* [0x8] */ + uint32_t local_cause_mask; + uint32_t rsrvd_1; + /* [0x10] */ + uint32_t pmus_cause_mask; + uint32_t rsrvd_2[3]; + /* [0x20] Specifies the state of the CPU with reference to po ... */ + uint32_t power_ctrl; + /* [0x24] */ + uint32_t power_status; + /* [0x28] */ + uint32_t resume_addr_l; + /* [0x2c] */ + uint32_t resume_addr_h; + uint32_t rsrvd[52]; +}; +struct al_nb_mc_pmu { + /* [0x0] PMU Global Control Register */ + uint32_t pmu_control; + /* [0x4] PMU Global Control Register */ + uint32_t overflow; + uint32_t rsrvd[62]; +}; +struct al_nb_mc_pmu_counters { + /* [0x0] Counter Configuration Register */ + uint32_t cfg; + /* [0x4] Counter Control Register */ + uint32_t cntl; + /* [0x8] Counter Control Register */ + uint32_t low; + /* [0xc] Counter Control Register */ + uint32_t high; + uint32_t rsrvd[4]; +}; +struct al_nb_nb_version { + /* [0x0] Northbridge Revision */ + uint32_t version; + uint32_t rsrvd; +}; +struct al_nb_sriov { + /* [0x0] */ + uint32_t cpu_vmid[4]; + uint32_t rsrvd[4]; +}; +union al_nb_pcie_logging { + struct { + /* [0x0] */ + uint32_t control; + uint32_t rsrvd_0[3]; + /* [0x10] */ + uint32_t wr_window_low; + /* [0x14] */ + uint32_t wr_window_high; + /* [0x18] */ + uint32_t wr_window_size; + uint32_t rsrvd_1; + /* [0x20] */ + uint32_t fifo_base; + /* [0x24] */ + uint32_t fifo_size; + /* [0x28] */ + uint32_t fifo_head; + /* [0x2c] */ + uint32_t fifo_tail; + /* [0x30] */ + uint32_t wr_window_low_1; + /* [0x34] */ + uint32_t wr_window_high_1; + /* [0x38] */ + uint32_t wr_window_size_1; + uint32_t rsrvd_2; + /* [0x40] */ + uint32_t fifo_base_1; + /* [0x44] */ + uint32_t fifo_size_1; + /* [0x48] */ + uint32_t fifo_head_1; + /* [0x4c] */ + uint32_t fifo_tail_1; + /* [0x50] */ + uint32_t rd_window_low; + /* [0x54] */ + uint32_t rd_window_high; + /* [0x58] */ + uint32_t rd_window_size; + /* [0x5c] */ + uint32_t read_latch; + /* [0x60] */ + uint32_t rd_window_low_1; + /* [0x64] */ + uint32_t rd_window_high_1; + /* [0x68] */ + uint32_t rd_window_size_1; + /* [0x6c] */ + uint32_t read_latch_1; + /* [0x70] */ + uint32_t read_latch_timeout; + uint32_t rsrvd[35]; + } a0; + struct { + uint32_t control; + uint32_t read_latch; + uint32_t window_low; + uint32_t rsrvd_0; + uint32_t window_high; + uint32_t fifo_base; + uint32_t fifo_size; + uint32_t fifo_head; /* */ + uint32_t fifo_tail; + uint32_t rsrvd[55]; + } m0; +}; + +struct al_nb_regs { + struct al_nb_global global; /* [0x0] */ + struct al_nb_system_counter system_counter; /* [0x1000] */ + struct al_nb_rams_control_misc rams_control_misc; /* [0x1100] */ + struct al_nb_ca15_rams_control ca15_rams_control[5]; /* [0x1200] */ + uint32_t rsrvd_0[108]; + struct al_nb_semaphores semaphores[64]; /* [0x1400] */ + uint32_t rsrvd_1[320]; + struct al_nb_debug debug; /* [0x1a00] */ + uint32_t rsrvd_2[256]; + struct al_nb_cpun_config_status cpun_config_status[4]; /* [0x2000] */ + uint32_t rsrvd_3[1792]; + struct al_nb_mc_pmu mc_pmu; /* [0x4000] */ + struct al_nb_mc_pmu_counters mc_pmu_counters[4]; /* [0x4100] */ + uint32_t rsrvd_4[160]; + struct al_nb_nb_version nb_version; /* [0x4400] */ + uint32_t rsrvd_5[126]; + struct al_nb_sriov sriov; /* [0x4600] */ + uint32_t rsrvd_6[632]; + union al_nb_pcie_logging pcie_logging; /* [0x5000] */ +}; + + +/* +* Registers Fields +*/ + + +/**** CPUs_Config register ****/ +/* Disable broadcast of barrier onto system bus */ +#define NB_GLOBAL_CPUS_CONFIG_SYSBARDISABLE (1 << 0) +/* Enable broadcast of inner shareable transactions from CPUs */ +#define NB_GLOBAL_CPUS_CONFIG_BROADCASTINNER (1 << 1) +/* Disable broadcast of cache maintanance system bus */ +#define NB_GLOBAL_CPUS_CONFIG_BROADCASTCACHEMAINT (1 << 2) +/* Enable broadcast of outer shareable transactions from CPUs */ +#define NB_GLOBAL_CPUS_CONFIG_BROADCASTOUTER (1 << 3) +/* Defines the internal CPU GIC operating frequency ratio with t ... */ +#define NB_GLOBAL_CPUS_CONFIG_PERIPHCLKEN_MASK 0x00000030 +#define NB_GLOBAL_CPUS_CONFIG_PERIPHCLKEN_SHIFT 4 + +/**** CPUs_Secure register ****/ +/* dbgen +Write once. */ +#define NB_GLOBAL_CPUS_SECURE_DBGEN (1 << 0) +/* niden +Write once. */ +#define NB_GLOBAL_CPUS_SECURE_NIDEN (1 << 1) +/* spiden +Write once. */ +#define NB_GLOBAL_CPUS_SECURE_SPIDEN (1 << 2) +/* spniden +Write once. */ +#define NB_GLOBAL_CPUS_SECURE_SPNIDEN (1 << 3) +/* Disable write access to some secure GIC registers */ +#define NB_GLOBAL_CPUS_SECURE_CFGSDISABLE (1 << 4) + +/**** CPUs_Init_Control register ****/ +/* CPU Init DoneSpecifies which CPUs' inits are done and can exi ... */ +#define NB_GLOBAL_CPUS_INIT_CONTROL_CPUS_INITDONE_MASK 0x0000000F +#define NB_GLOBAL_CPUS_INIT_CONTROL_CPUS_INITDONE_SHIFT 0 +/* DBGPWRDNREQ MaskWhen CPU does not exist, its dbgpwrdnreq must ... */ +#define NB_GLOBAL_CPUS_INIT_CONTROL_DBGPWRDNREQ_MASK_MASK 0x000000F0 +#define NB_GLOBAL_CPUS_INIT_CONTROL_DBGPWRDNREQ_MASK_SHIFT 4 +/* Force CPU init power-on-reset exit. +For debug purposes only. */ +#define NB_GLOBAL_CPUS_INIT_CONTROL_FORCE_CPUPOR_MASK 0x00000F00 +#define NB_GLOBAL_CPUS_INIT_CONTROL_FORCE_CPUPOR_SHIFT 8 + +/**** CPUs_Init_Status register ****/ +/* Specifies which CPUs are enabled in the device configurations ... */ +#define NB_GLOBAL_CPUS_INIT_STATUS_CPUS_EXIST_MASK 0x0000000F +#define NB_GLOBAL_CPUS_INIT_STATUS_CPUS_EXIST_SHIFT 0 + +/**** NB_Int_Cause register ****/ +/* + * Each bit corresponds to an IRQ. + * value is 1 for level irq, 0 for trigger irq + * Level IRQ indices: 12-13, 23, 24, 26-29 + */ +#define NB_GLOBAL_NB_INT_CAUSE_LEVEL_IRQ_MASK 0x3D803000 +/* Cross trigger interrupt */ +#define NB_GLOBAL_NB_INT_CAUSE_NCTIIRQ_MASK 0x0000000F +#define NB_GLOBAL_NB_INT_CAUSE_NCTIIRQ_SHIFT 0 +/* Communications channel receive */ +#define NB_GLOBAL_NB_INT_CAUSE_COMMRX_MASK 0x000000F0 +#define NB_GLOBAL_NB_INT_CAUSE_COMMRX_SHIFT 4 +/* Communication channel transmit */ +#define NB_GLOBAL_NB_INT_CAUSE_COMMTX_MASK 0x00000F00 +#define NB_GLOBAL_NB_INT_CAUSE_COMMTX_SHIFT 8 +/* Emulation write fifo log has valid entry */ +#define NB_GLOBAL_NB_INT_CAUSE_PCIE_LOG_FIFO_VALID_0 (1 << 12) +/* Write logging FIFO wrap occurred */ +#define NB_GLOBAL_NB_INT_CAUSE_WR_LOG_FIFO_WRAP_M0 (1 << 13) +/* Emulation write fifo log has valid entry */ +#define NB_GLOBAL_NB_INT_CAUSE_PCIE_LOG_FIFO_VALID_1_A0 (1 << 13) +/* Write logging FIFO is full */ +#define NB_GLOBAL_NB_INT_CAUSE_WR_LOG_FIFO_FULL_M0 (1 << 14) +/* Reserved, read undefined must write as zeros. */ +#define NB_GLOBAL_NB_INT_CAUSE_RESERVED_15_15 (1 << 15) +/* Error indicator for AXI write transactions with a BRESP error ... */ +#define NB_GLOBAL_NB_INT_CAUSE_CPU_AXIERRIRQ (1 << 16) +/* Error indicator for: L2 RAM double-bit ECC error, illegal wri ... */ +#define NB_GLOBAL_NB_INT_CAUSE_CPU_INTERRIRQ (1 << 17) +/* Coherent fabric error summary interrupt */ +#define NB_GLOBAL_NB_INT_CAUSE_ACF_ERRORIRQ (1 << 18) +/* DDR Controller ECC Correctable error summary interrupt */ +#define NB_GLOBAL_NB_INT_CAUSE_MCTL_ECC_CORR_ERR (1 << 19) +/* DDR Controller ECC Uncorrectable error summary interrupt */ +#define NB_GLOBAL_NB_INT_CAUSE_MCTL_ECC_UNCORR_ERR (1 << 20) +/* DRAM parity error interrupt */ +#define NB_GLOBAL_NB_INT_CAUSE_MCTL_PARITY_ERR (1 << 21) +/* Reserved, not functional */ +#define NB_GLOBAL_NB_INT_CAUSE_MCTL_WDATARAM_PAR (1 << 22) +/* Error cause summary interrupt */ +#define NB_GLOBAL_NB_INT_CAUSE_ERR_CAUSE_SUM_A0 (1 << 23) +/* SB PoS error */ +#define NB_GLOBAL_NB_INT_CAUSE_SB_POS_ERR (1 << 24) +/* Received msix is not mapped to local GIC or IO-GIC spin */ +#define NB_GLOBAL_NB_INT_CAUSE_MSIX_ERR_INT_M0 (1 << 25) +/* Coresight timestamp overflow */ +#define NB_GLOBAL_NB_INT_CAUSE_CORESIGHT_TS_OVERFLOW_M0 (1 << 26) +/* Emulation write fifo log is wrapped */ +#define NB_GLOBAL_NB_INT_CAUSE_WR_LOG_FIFO_WRAP_A0 (1 << 26) +/* Write data parity error from SB channel 0. */ +#define NB_GLOBAL_NB_INT_CAUSE_SB0_WRDATA_PERR_M0 (1 << 27) +/* Emulation write fifo log is full (new pushes might corrupt da ... */ +#define NB_GLOBAL_NB_INT_CAUSE_WR_LOG_FIFO_FULL_A0 (1 << 27) +/* Write data parity error from SB channel 1. */ +#define NB_GLOBAL_NB_INT_CAUSE_SB1_WRDATA_PERR_M0 (1 << 28) +/* Emulation write fifo log is wrapped */ +#define NB_GLOBAL_NB_INT_CAUSE_WR_LOG_FIFO_WRAP_1_A0 (1 << 28) +/* Read data parity error from SB slaves. */ +#define NB_GLOBAL_NB_INT_CAUSE_SB_SLV_RDATA_PERR_M0 (1 << 29) +/* Emulation write fifo log is full (new pushes might corrupt da ... */ +#define NB_GLOBAL_NB_INT_CAUSE_WR_LOG_FIFO_FULL_1_A0 (1 << 29) +/* PCIe read latched */ +#define NB_GLOBAL_NB_INT_CAUSE_RD_LOG_SET_0 (1 << 30) +/* PCIe read latched */ +#define NB_GLOBAL_NB_INT_CAUSE_RD_LOG_SET_1_A0 (1 << 31) + +/**** SEV_Int_Cause register ****/ +/* SMMU 0/1 global non-secure fault interrupt */ +#define NB_GLOBAL_SEV_INT_CAUSE_SMMU_GBL_FLT_IRPT_NS_MASK 0x00000003 +#define NB_GLOBAL_SEV_INT_CAUSE_SMMU_GBL_FLT_IRPT_NS_SHIFT 0 +/* SMMU 0/1 non-secure context interrupt */ +#define NB_GLOBAL_SEV_INT_CAUSE_SMMU_CXT_IRPT_NS_MASK 0x0000000C +#define NB_GLOBAL_SEV_INT_CAUSE_SMMU_CXT_IRPT_NS_SHIFT 2 +/* SMMU0/1 Non-secure configurtion acess fault interrupt */ +#define NB_GLOBAL_SEV_INT_CAUSE_SMMU_CFG_FLT_IRPT_S_MASK 0x00000030 +#define NB_GLOBAL_SEV_INT_CAUSE_SMMU_CFG_FLT_IRPT_S_SHIFT 4 +/* Reserved. Read undefined; must write as zeros. */ +#define NB_GLOBAL_SEV_INT_CAUSE_RESERVED_11_6_MASK 0x00000FC0 +#define NB_GLOBAL_SEV_INT_CAUSE_RESERVED_11_6_SHIFT 6 +/* PCIe emulation: inbound writes fifo has valid entry */ +#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_LOG_FIFO_VALID_0 (1 << 12) +/* PCIe emulation: inbound writes fifo has being wrapped (tail p ... */ +#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_LOG_FIFO_WRAP_0 (1 << 13) +/* PCIe emulation: inbound writes fifo is full */ +#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_LOG_FIFO_FULL_0 (1 << 14) +/* PCIe emulation: inbound writes fifo has valid entry */ +#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_LOG_FIFO_VALID_1 (1 << 15) +/* PCIe emulation: inbound writes fifo has being wrapped (tail p ... */ +#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_LOG_FIFO_WRAP_1 (1 << 16) +/* PCIe emulation: inbound writes fifo is full */ +#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_LOG_FIFO_FULL_1 (1 << 17) +/* PCIe emulation: inbound pcie read is latched */ +#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_RD_LOG_SET_0 (1 << 18) +/* PCIe emulation: inbound pcie read is latched */ +#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_RD_LOG_SET_1 (1 << 19) +/* Reserved. Read undefined; must write as zeros. */ +#define NB_GLOBAL_SEV_INT_CAUSE_RESERVED_31_20_MASK 0xFFF00000 +#define NB_GLOBAL_SEV_INT_CAUSE_RESERVED_31_20_SHIFT 20 + +/**** PMUs_Int_Cause register ****/ +/* CPUs PMU Overflow interrupt */ +#define NB_GLOBAL_PMUS_INT_CAUSE_CPUS_OVFL_MASK 0x0000000F +#define NB_GLOBAL_PMUS_INT_CAUSE_CPUS_OVFL_SHIFT 0 +/* Northbridge PMU overflow */ +#define NB_GLOBAL_PMUS_INT_CAUSE_NB_OVFL (1 << 4) +/* Memory Controller PMU overflow */ +#define NB_GLOBAL_PMUS_INT_CAUSE_MCTL_OVFL (1 << 5) +/* Coherency Interconnect PMU overflow */ +#define NB_GLOBAL_PMUS_INT_CAUSE_CCI_OVFL_MASK 0x000007C0 +#define NB_GLOBAL_PMUS_INT_CAUSE_CCI_OVFL_SHIFT 6 +/* Coherency Interconnect PMU overflow */ +#define NB_GLOBAL_PMUS_INT_CAUSE_SMMU_OVFL_MASK 0x00001800 +#define NB_GLOBAL_PMUS_INT_CAUSE_SMMU_OVFL_SHIFT 11 +/* Reserved. Read undefined; must write as zeros. */ +#define NB_GLOBAL_PMUS_INT_CAUSE_RESERVED_23_13_MASK 0x00FFE000 +#define NB_GLOBAL_PMUS_INT_CAUSE_RESERVED_23_13_SHIFT 13 +/* Southbridge PMUs overflow */ +#define NB_GLOBAL_PMUS_INT_CAUSE_SB_PMUS_OVFL_MASK 0xFF000000 +#define NB_GLOBAL_PMUS_INT_CAUSE_SB_PMUS_OVFL_SHIFT 24 + +/**** CPUs_Hold_Reset register ****/ +/* Shared L2 memory system, interrupt controller and timer logic ... */ +#define NB_GLOBAL_CPUS_HOLD_RESET_L2RESET (1 << 0) +/* Shared debug domain reset */ +#define NB_GLOBAL_CPUS_HOLD_RESET_PRESETDBG (1 << 1) +/* Individual CPU debug, PTM, watchpoint and breakpoint logic re ... */ +#define NB_GLOBAL_CPUS_HOLD_RESET_CPU_DBGRESET_MASK 0x000000F0 +#define NB_GLOBAL_CPUS_HOLD_RESET_CPU_DBGRESET_SHIFT 4 +/* Individual CPU core and VFP/NEON logic reset */ +#define NB_GLOBAL_CPUS_HOLD_RESET_CPU_CORERESET_MASK 0x00000F00 +#define NB_GLOBAL_CPUS_HOLD_RESET_CPU_CORERESET_SHIFT 8 +/* Individual CPU por-on-reset */ +#define NB_GLOBAL_CPUS_HOLD_RESET_CPU_PORESET_MASK 0x0000F000 +#define NB_GLOBAL_CPUS_HOLD_RESET_CPU_PORESET_SHIFT 12 +/* Wait for interrupt mask */ +#define NB_GLOBAL_CPUS_HOLD_RESET_WFI_MASK_MASK 0x000F0000 +#define NB_GLOBAL_CPUS_HOLD_RESET_WFI_MASK_SHIFT 16 + +/**** CPUs_Software_Reset register ****/ +/* Write 1. Apply the software reset. */ +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_SWRESET_REQ (1 << 0) +/* Defines the level of software reset. */ +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_MASK 0x0000000E +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT 1 +/* Individual CPU core reset. */ +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_CPU_CORE \ + (0x0 << NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT) +/* Individual CPU power-on-reset. */ +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_CPU_PORESET \ + (0x1 << NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT) +/* Individual CPU debug reset. */ +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_CPU_DBG \ + (0x2 << NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT) +/* A Cluster reset puts each core into core reset (no dbg) and a ... */ +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_CLUSTER_NO_DBG \ + (0x3 << NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT) +/* A Cluster reset puts each core into power-on-reset and also r ... */ +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_CLUSTER \ + (0x4 << NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT) +/* A Cluster power-on-reset puts each core into power-on-reset a ... */ +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_CLUSTER_PORESET \ + (0x5 << NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT) +/* Defines which cores to reset when no cluster_poreset is reque ... */ +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_CORES_MASK 0x000000F0 +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_CORES_SHIFT 4 +/* CPUn wait for interrupt enable */ +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_WFI_MASK_MASK 0x000F0000 +#define NB_GLOBAL_CPUS_SOFTWARE_RESET_WFI_MASK_SHIFT 16 + +/**** WD_Timer0_Reset register ****/ +/* Shared L2 memory system, interrupt controller and timer logic ... */ +#define NB_GLOBAL_WD_TIMER0_RESET_L2RESET (1 << 0) +/* Shared debug domain reset */ +#define NB_GLOBAL_WD_TIMER0_RESET_PRESETDBG (1 << 1) +/* Individual CPU debug PTM, watchpoint and breakpoint logic res ... */ +#define NB_GLOBAL_WD_TIMER0_RESET_CPU_DBGRESET_MASK 0x000000F0 +#define NB_GLOBAL_WD_TIMER0_RESET_CPU_DBGRESET_SHIFT 4 +/* Individual CPU core and VFP/NEON logic reset */ +#define NB_GLOBAL_WD_TIMER0_RESET_CPU_CORERESET_MASK 0x00000F00 +#define NB_GLOBAL_WD_TIMER0_RESET_CPU_CORERESET_SHIFT 8 +/* Individual CPU por-on-reset */ +#define NB_GLOBAL_WD_TIMER0_RESET_CPU_PORESET_MASK 0x0000F000 +#define NB_GLOBAL_WD_TIMER0_RESET_CPU_PORESET_SHIFT 12 + +/**** WD_Timer1_Reset register ****/ +/* Shared L2 memory system, interrupt controller and timer logic ... */ +#define NB_GLOBAL_WD_TIMER1_RESET_L2RESET (1 << 0) +/* Shared debug domain reset */ +#define NB_GLOBAL_WD_TIMER1_RESET_PRESETDBG (1 << 1) +/* Individual CPU debug PTM, watchpoint and breakpoint logic res ... */ +#define NB_GLOBAL_WD_TIMER1_RESET_CPU_DBGRESET_MASK 0x000000F0 +#define NB_GLOBAL_WD_TIMER1_RESET_CPU_DBGRESET_SHIFT 4 +/* Individual CPU core and VFP/NEON logic reset */ +#define NB_GLOBAL_WD_TIMER1_RESET_CPU_CORERESET_MASK 0x00000F00 +#define NB_GLOBAL_WD_TIMER1_RESET_CPU_CORERESET_SHIFT 8 +/* Individual CPU por-on-reset */ +#define NB_GLOBAL_WD_TIMER1_RESET_CPU_PORESET_MASK 0x0000F000 +#define NB_GLOBAL_WD_TIMER1_RESET_CPU_PORESET_SHIFT 12 + +/**** WD_Timer2_Reset register ****/ +/* Shared L2 memory system, interrupt controller and timer logic ... */ +#define NB_GLOBAL_WD_TIMER2_RESET_L2RESET (1 << 0) +/* Shared debug domain reset */ +#define NB_GLOBAL_WD_TIMER2_RESET_PRESETDBG (1 << 1) +/* Individual CPU debug, PTM, watchpoint and breakpoint logic re ... */ +#define NB_GLOBAL_WD_TIMER2_RESET_CPU_DBGRESET_MASK 0x000000F0 +#define NB_GLOBAL_WD_TIMER2_RESET_CPU_DBGRESET_SHIFT 4 +/* Individual CPU core and VFP/NEON logic reset */ +#define NB_GLOBAL_WD_TIMER2_RESET_CPU_CORERESET_MASK 0x00000F00 +#define NB_GLOBAL_WD_TIMER2_RESET_CPU_CORERESET_SHIFT 8 +/* Individual CPU por-on-reset */ +#define NB_GLOBAL_WD_TIMER2_RESET_CPU_PORESET_MASK 0x0000F000 +#define NB_GLOBAL_WD_TIMER2_RESET_CPU_PORESET_SHIFT 12 + +/**** WD_Timer3_Reset register ****/ +/* Shared L2 memory system, interrupt controller and timer logic ... */ +#define NB_GLOBAL_WD_TIMER3_RESET_L2RESET (1 << 0) +/* Shared debug domain reset */ +#define NB_GLOBAL_WD_TIMER3_RESET_PRESETDBG (1 << 1) +/* Individual CPU debug, PTM, watchpoint and breakpoint logic re ... */ +#define NB_GLOBAL_WD_TIMER3_RESET_CPU_DBGRESET_MASK 0x000000F0 +#define NB_GLOBAL_WD_TIMER3_RESET_CPU_DBGRESET_SHIFT 4 +/* Individual CPU core and VFP/NEON logic reset */ +#define NB_GLOBAL_WD_TIMER3_RESET_CPU_CORERESET_MASK 0x00000F00 +#define NB_GLOBAL_WD_TIMER3_RESET_CPU_CORERESET_SHIFT 8 +/* Individual CPU por-on-reset */ +#define NB_GLOBAL_WD_TIMER3_RESET_CPU_PORESET_MASK 0x0000F000 +#define NB_GLOBAL_WD_TIMER3_RESET_CPU_PORESET_SHIFT 12 + +/**** DDRC_Hold_Reset register ****/ +/* DDR Control and PHY memory mapped registers reset control0 - ... */ +#define NB_GLOBAL_DDRC_HOLD_RESET_APB_SYNC_RESET (1 << 0) +/* DDR Control Core reset control0 - Reset is deasserted */ +#define NB_GLOBAL_DDRC_HOLD_RESET_CORE_SYNC_RESET (1 << 1) +/* DDR Control AXI Interface reset control0 - Reset is deasserte ... */ +#define NB_GLOBAL_DDRC_HOLD_RESET_AXI_SYNC_RESET (1 << 2) +/* DDR PUB Controller reset control0 - Reset is deasserted */ +#define NB_GLOBAL_DDRC_HOLD_RESET_PUB_CTL_SYNC_RESET (1 << 3) +/* DDR PUB SDR Controller reset control0 - Reset is deasserted */ +#define NB_GLOBAL_DDRC_HOLD_RESET_PUB_SDR_SYNC_RESET (1 << 4) +/* DDR PHY reset control0 - Reset is deasserted */ +#define NB_GLOBAL_DDRC_HOLD_RESET_PHY_SYNC_RESET (1 << 5) +/* Memory initialization input to DDR SRAM for parity check supp ... */ +#define NB_GLOBAL_DDRC_HOLD_RESET_DDR_UNIT_MEM_INIT (1 << 6) + +/**** Fabric_Software_Reset register ****/ +/* Write 1 apply the software reset. */ +#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_SWRESET_REQ (1 << 0) +/* Defines the level of software reset. */ +#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_MASK 0x0000000E +#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_SHIFT 1 +/* Fabric reset */ +#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_FABRIC \ + (0x0 << NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_SHIFT) +/* GIC reset */ +#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_GIC \ + (0x1 << NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_SHIFT) +/* SMMU reset */ +#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_SMMU \ + (0x2 << NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_SHIFT) +/* CPUn waiting for interrupt enable */ +#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_WFI_MASK_MASK 0x000F0000 +#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_WFI_MASK_SHIFT 16 + +/**** CPUs_Power_Ctrl register ****/ +/* L2 WFI enableWhen all the processors are in WFI mode or power ... */ +#define NB_GLOBAL_CPUS_POWER_CTRL_L2WFI_EN (1 << 0) +/* L2 WFI status */ +#define NB_GLOBAL_CPUS_POWER_CTRL_L2WFI_STATUS (1 << 1) +/* L2 RAMs Power DownPower down the L2 RAMs */ +#define NB_GLOBAL_CPUS_POWER_CTRL_L2RAMS_PWRDN_EN (1 << 2) +/* L2 RAMs power down status */ +#define NB_GLOBAL_CPUS_POWER_CTRL_L2RAMS_PWRDN_STATUS (1 << 3) +/* CPU state condition to enable L2 RAM power down0 - Power down ... */ +#define NB_GLOBAL_CPUS_POWER_CTRL_L2RAMS_PWRDN_CPUS_STATE_MASK 0x000000F0 +#define NB_GLOBAL_CPUS_POWER_CTRL_L2RAMS_PWRDN_CPUS_STATE_SHIFT 4 +/* Enable external debugger over power-down */ +#define NB_GLOBAL_CPUS_POWER_CTRL_EXT_DEBUGGER_OVER_PD_EN (1 << 8) +/* force wakeup the CPU in L2RAM powedwnINTERNAL DEBUG PURPOSE O ... */ +#define NB_GLOBAL_CPUS_POWER_CTRL_FORCE_CPUS_OK_PWRUP (1 << 27) +/* L2 RAMs power down SM status */ +#define NB_GLOBAL_CPUS_POWER_CTRL_L2RAMS_PWRDN_SM_STATUS_MASK 0xF0000000 +#define NB_GLOBAL_CPUS_POWER_CTRL_L2RAMS_PWRDN_SM_STATUS_SHIFT 28 + +/**** ACF_Base_High register ****/ +/* Coherency Fabric registers base [39:32]. */ +#define NB_GLOBAL_ACF_BASE_HIGH_BASE_39_32_MASK 0x000000FF +#define NB_GLOBAL_ACF_BASE_HIGH_BASE_39_32_SHIFT 0 +/* Coherency Fabric registers base [31:15] */ +#define NB_GLOBAL_ACF_BASE_LOW_BASED_31_15_MASK 0xFFFF8000 +#define NB_GLOBAL_ACF_BASE_LOW_BASED_31_15_SHIFT 15 + +/**** ACF_Control_Override register ****/ +/* Override the AWCACHE[0] and ARCACHE[0] outputs to benon-buffe ... */ +#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_BUFFOVRD_MASK 0x00000007 +#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_BUFFOVRD_SHIFT 0 +/* Overrides the ARQOS and AWQOS input signals */ +#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_QOSOVRD_MASK 0x000000F8 +#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_QOSOVRD_SHIFT 3 +/* If LOW, then AC requests are never issued on the correspondin ... */ +#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_ACE_CH_EN_MASK 0x00001F00 +#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_ACE_CH_EN_SHIFT 8 +/* Internal register:Enables 4k hazard of post-barrier vs pre-ba ... */ +#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_DMB_4K_HAZARD_EN (1 << 13) + +/**** LGIC_Base_High register ****/ +/* GIC registers base [39:32] */ +#define NB_GLOBAL_LGIC_BASE_HIGH_BASE_39_32_MASK 0x000000FF +#define NB_GLOBAL_LGIC_BASE_HIGH_BASE_39_32_SHIFT 0 +/* GIC registers base [31:15] */ +#define NB_GLOBAL_LGIC_BASE_LOW_BASED_31_15_MASK 0xFFFF8000 +#define NB_GLOBAL_LGIC_BASE_LOW_BASED_31_15_SHIFT 15 + +/**** IOGIC_Base_High register ****/ +/* IOGIC registers base [39:32] */ +#define NB_GLOBAL_IOGIC_BASE_HIGH_BASE_39_32_MASK 0x000000FF +#define NB_GLOBAL_IOGIC_BASE_HIGH_BASE_39_32_SHIFT 0 +/* IOGIC registers base [31:15] */ +#define NB_GLOBAL_IOGIC_BASE_LOW_BASED_31_15_MASK 0xFFFF8000 +#define NB_GLOBAL_IOGIC_BASE_LOW_BASED_31_15_SHIFT 15 + +/**** IO_Wr_Split_Control register ****/ +/* Write splitters bypass */ +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_BYPASS_MASK 0x00000003 +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_BYPASS_SHIFT 0 +/* Write splitters store and forward */ +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_ST_FW_MASK 0x0000000C +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_ST_FW_SHIFT 2 +/* Write splitters unmodify snoop type */ +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_UNMODIFY_SNP_MASK 0x00000030 +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_UNMODIFY_SNP_SHIFT 4 +/* Write splitters unsplit non-coherent access */ +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_UNSPLIT_NOSNP_MASK 0x000000C0 +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_UNSPLIT_NOSNP_SHIFT 6 +/* Write splitter rate limit. */ +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR0_SPLT_RATE_LIMIT_MASK 0x00001F00 +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR0_SPLT_RATE_LIMIT_SHIFT 8 +/* Write splitter rate limit */ +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR1_SPLT_RATE_LIMIT_MASK 0x0003E000 +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR1_SPLT_RATE_LIMIT_SHIFT 13 +/* Clear is not supported */ +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_CLEAR_MASK 0xC0000000 +#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_CLEAR_SHIFT 30 + +/**** IO_Rd_ROB_Control register ****/ +/* Read ROB Bypass[0] Rd ROB 0 bypass enable */ +#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD_ROB_BYPASS_MASK 0x00000003 +#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD_ROB_BYPASS_SHIFT 0 +/* Read ROB in order */ +#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD_ROB_INORDER_MASK 0x0000000C +#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD_ROB_INORDER_SHIFT 2 +/* Read splitter rate limit */ +#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD0_ROB_RATE_LIMIT_MASK 0x00001F00 +#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD0_ROB_RATE_LIMIT_SHIFT 8 +/* Read splitter rate limit */ +#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD1_ROB_RATE_LIMIT_MASK 0x0003E000 +#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD1_ROB_RATE_LIMIT_SHIFT 13 + +/**** SB_PoS_Error_Log_1 register ****/ +/* Error Log 1[7:0] address_high[16:8] request id[18:17] bresp ... */ +#define NB_GLOBAL_SB_POS_ERROR_LOG_1_ERR_LOG_MASK 0x7FFFFFFF +#define NB_GLOBAL_SB_POS_ERROR_LOG_1_ERR_LOG_SHIFT 0 +/* Valid logged errorSet on SB PoS error occurance on capturing ... */ +#define NB_GLOBAL_SB_POS_ERROR_LOG_1_VALID (1 << 31) + +/**** MSIx_Error_Log register ****/ +/* Error Log +Corresponds to MSIx address message [30:0]. */ +#define NB_GLOBAL_MSIX_ERROR_LOG_ERR_LOG_MASK 0x7FFFFFFF +#define NB_GLOBAL_MSIX_ERROR_LOG_ERR_LOG_SHIFT 0 +/* Valid logged error */ +#define NB_GLOBAL_MSIX_ERROR_LOG_VALID (1 << 31) + +/**** Error_Cause register ****/ +/* PCIe emulation: inbound pcie read latch timeout */ +#define NB_GLOBAL_ERROR_CAUSE_PCIE_RD_LOG_0_TIMEOUT (1 << 0) +/* PCIe emulation: inbound pcie read latch timeout */ +#define NB_GLOBAL_ERROR_CAUSE_PCIE_RD_LOG_1_TIMEOUT (1 << 1) +/* Received msix is not mapped to local GIC or IO-GIC spin */ +#define NB_GLOBAL_ERROR_CAUSE_MSIX_ERR_INT (1 << 2) +/* Coresight timestamp overflow */ +#define NB_GLOBAL_ERROR_CAUSE_CORESIGHT_TS_OVERFLOW (1 << 3) +/* Write data parity error from SB channel 0. */ +#define NB_GLOBAL_ERROR_CAUSE_SB0_WRDATA_PERR (1 << 4) +/* Write data parity error from SB channel 1. */ +#define NB_GLOBAL_ERROR_CAUSE_SB1_WRDATA_PERR (1 << 5) +/* Read data parity error from SB slaves. */ +#define NB_GLOBAL_ERROR_CAUSE_SB_SLV_RDATA_PERR (1 << 6) +/* Reserved. Read undefined; must write as zeros. */ +#define NB_GLOBAL_ERROR_CAUSE_RESERVED_31_7_MASK 0xFFFFFF80 +#define NB_GLOBAL_ERROR_CAUSE_RESERVED_31_7_SHIFT 7 + +/**** QoS_Peak_Control register ****/ +/* Peak Read Low ThresholdWhen the number of outstanding read tr ... */ +#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_RD_L_THRESHOLD_MASK 0x0000007F +#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_RD_L_THRESHOLD_SHIFT 0 +/* Peak Read High ThresholdWhen the number of outstanding read t ... */ +#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_RD_H_THRESHOLD_MASK 0x00007F00 +#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_RD_H_THRESHOLD_SHIFT 8 +/* Peak Write Low ThresholdWhen the number of outstanding write ... */ +#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_WR_L_THRESHOLD_MASK 0x007F0000 +#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_WR_L_THRESHOLD_SHIFT 16 +/* Peak Write High ThresholdWhen the number of outstanding write ... */ +#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_WR_H_THRESHOLD_MASK 0x7F000000 +#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_WR_H_THRESHOLD_SHIFT 24 + +/**** QoS_Set_Control register ****/ +/* CPU Low priority Read QoS */ +#define NB_GLOBAL_QOS_SET_CONTROL_CPU_LP_ARQOS_MASK 0x0000000F +#define NB_GLOBAL_QOS_SET_CONTROL_CPU_LP_ARQOS_SHIFT 0 +/* CPU High priority Read QoS */ +#define NB_GLOBAL_QOS_SET_CONTROL_CPU_HP_ARQOS_MASK 0x000000F0 +#define NB_GLOBAL_QOS_SET_CONTROL_CPU_HP_ARQOS_SHIFT 4 +/* CPU Low priority Write QoS */ +#define NB_GLOBAL_QOS_SET_CONTROL_CPU_LP_AWQOS_MASK 0x00000F00 +#define NB_GLOBAL_QOS_SET_CONTROL_CPU_LP_AWQOS_SHIFT 8 +/* CPU High priority Write QoS */ +#define NB_GLOBAL_QOS_SET_CONTROL_CPU_HP_AWQOS_MASK 0x0000F000 +#define NB_GLOBAL_QOS_SET_CONTROL_CPU_HP_AWQOS_SHIFT 12 +/* SB Low priority Read QoS */ +#define NB_GLOBAL_QOS_SET_CONTROL_SB_LP_ARQOS_MASK 0x000F0000 +#define NB_GLOBAL_QOS_SET_CONTROL_SB_LP_ARQOS_SHIFT 16 +/* SB Low-priority Write QoS */ +#define NB_GLOBAL_QOS_SET_CONTROL_SB_LP_AWQOS_MASK 0x00F00000 +#define NB_GLOBAL_QOS_SET_CONTROL_SB_LP_AWQOS_SHIFT 20 + +/**** DDR_QoS register ****/ +/* High Priority Read ThresholdLimits the number of outstanding ... */ +#define NB_GLOBAL_DDR_QOS_HIGH_PRIO_THRESHOLD_MASK 0x0000007F +#define NB_GLOBAL_DDR_QOS_HIGH_PRIO_THRESHOLD_SHIFT 0 +/* DDR Low Priority QoSFabric priority below this value is mappe ... */ +#define NB_GLOBAL_DDR_QOS_LP_QOS_MASK 0x00000F00 +#define NB_GLOBAL_DDR_QOS_LP_QOS_SHIFT 8 + +/**** ACF_Misc register ****/ +/* Disable DDR Write ChopPerformance optimitation feature to cho ... */ +#define NB_GLOBAL_ACF_MISC_DDR_WR_CHOP_DIS (1 << 0) +/* Disable SB-2-SB path through NB fabric. */ +#define NB_GLOBAL_ACF_MISC_SB2SB_PATH_DIS (1 << 1) +/* Disable ETR tracing to non-DDR. */ +#define NB_GLOBAL_ACF_MISC_ETR2SB_PATH_DIS (1 << 2) +/* Disable ETR tracing to non-DDR. */ +#define NB_GLOBAL_ACF_MISC_CPU2MSIX_DIS (1 << 3) +/* Disable CPU generation of MSIx By default, the CPU can set an ... */ +#define NB_GLOBAL_ACF_MISC_MSIX_TERMINATE_DIS (1 << 4) +/* Disable snoop override for MSIxBy default, an MSIx transactio ... */ +#define NB_GLOBAL_ACF_MISC_MSIX_SNOOPOVRD_DIS (1 << 5) +/* POS bypass */ +#define NB_GLOBAL_ACF_MISC_POS_BYPASS (1 << 6) +/* PoS ReadStronglyOrdered enableSO read forces flushing of all ... */ +#define NB_GLOBAL_ACF_MISC_POS_RSO_EN (1 << 7) +/* WRAP to INC transfer enable */ +#define NB_GLOBAL_ACF_MISC_POS_WRAP2INC (1 << 8) +/* PoS DSB flush DisableOn DSB from CPU, PoS blocks the progress ... */ +#define NB_GLOBAL_ACF_MISC_POS_DSB_FLUSH_DIS (1 << 9) +/* PoS DMB Flush DisableOn DMB from CPU, the PoS blocks the prog ... */ +#define NB_GLOBAL_ACF_MISC_POS_DMB_FLUSH_DIS (1 << 10) +/* change DMB functionality to DSB (block and drain) */ +#define NB_GLOBAL_ACF_MISC_POS_DMB_TO_DSB_EN (1 << 11) +/* Disable write after read stall when accessing IO fabric slave ... */ +#define NB_GLOBAL_ACF_MISC_M0_WAR_STALL_DIS (1 << 12) +/* Disable write after read stall when accessing DDR */ +#define NB_GLOBAL_ACF_MISC_M1_WAR_STALL_DIS (1 << 13) +/* spare configuration bits[14]: disable pos change to disable/e ... */ +#define NB_GLOBAL_ACF_MISC_CONFIG_SPARE_MASK 0x1FFFC000 +#define NB_GLOBAL_ACF_MISC_CONFIG_SPARE_SHIFT 14 +/* Enable CPU WriteUnique to WriteNoSnoop trasform */ +#define NB_GLOBAL_ACF_MISC_CPU_WU2WNS_EN (1 << 29) +/* Disable device after device check */ +#define NB_GLOBAL_ACF_MISC_WR_POS_DEV_AFTER_DEV_DIS (1 << 30) +/* Disable wrap to inc on write */ +#define NB_GLOBAL_ACF_MISC_WR_INC2WRAP_EN (1 << 31) + +/**** Config_Bus_Control register ****/ +/* Write slave error enable */ +#define NB_GLOBAL_CONFIG_BUS_CONTROL_WR_SLV_ERR_EN (1 << 0) +/* Write decode error enable */ +#define NB_GLOBAL_CONFIG_BUS_CONTROL_WR_DEC_ERR_EN (1 << 1) +/* Read slave error enable */ +#define NB_GLOBAL_CONFIG_BUS_CONTROL_RD_SLV_ERR_EN (1 << 2) +/* Read decode error enable */ +#define NB_GLOBAL_CONFIG_BUS_CONTROL_RD_DEC_ERR_EN (1 << 3) +/* Ignore Write ID */ +#define NB_GLOBAL_CONFIG_BUS_CONTROL_IGNORE_WR_ID (1 << 4) +/* Timeout limit before terminating configuration bus access wit ... */ +#define NB_GLOBAL_CONFIG_BUS_CONTROL_TIMEOUT_LIMIT_MASK 0xFFFFFF00 +#define NB_GLOBAL_CONFIG_BUS_CONTROL_TIMEOUT_LIMIT_SHIFT 8 + +/**** Coresight_PD register ****/ +/* ETF0 RAM force power down */ +#define NB_GLOBAL_CORESIGHT_PD_ETF0_RAM_FORCE_PD (1 << 0) +/* ETF1 RAM force power down */ +#define NB_GLOBAL_CORESIGHT_PD_ETF1_RAM_FORCE_PD (1 << 1) +/* ETF0 RAM force clock gate */ +#define NB_GLOBAL_CORESIGHT_PD_ETF0_RAM_FORCE_CG (1 << 2) +/* ETF1 RAM force clock gate */ +#define NB_GLOBAL_CORESIGHT_PD_ETF1_RAM_FORCE_CG (1 << 3) +/* APBIC clock enable */ +#define NB_GLOBAL_CORESIGHT_PD_APBICLKEN (1 << 4) +/* DAP system clock enable */ +#define NB_GLOBAL_CORESIGHT_PD_DAP_SYS_CLKEN (1 << 5) + +/**** Coresight_INTERNAL_0 register ****/ + +#define NB_GLOBAL_CORESIGHT_INTERNAL_0_CTIAPBSBYPASS (1 << 0) +/* CA15 CTM and Coresight CTI operate at same clock, bypass mode ... */ +#define NB_GLOBAL_CORESIGHT_INTERNAL_0_CISBYPASS (1 << 1) +/* CA15 CTM and Coresight CTI operate according to the same cloc ... */ +#define NB_GLOBAL_CORESIGHT_INTERNAL_0_CIHSBYPASS_MASK 0x0000003C +#define NB_GLOBAL_CORESIGHT_INTERNAL_0_CIHSBYPASS_SHIFT 2 + +/**** Coresight_DBGROMADDR register ****/ +/* Valid signal for DBGROMADDR. +Connected to DBGROMADDRV */ +#define NB_GLOBAL_CORESIGHT_DBGROMADDR_VALID (1 << 0) +/* Specifies bits [39:12] of the ROM table physical address. */ +#define NB_GLOBAL_CORESIGHT_DBGROMADDR_ADDR_39_12_MASK 0x3FFFFFFC +#define NB_GLOBAL_CORESIGHT_DBGROMADDR_ADDR_39_12_SHIFT 2 + +/**** Coresight_DBGSELFADDR register ****/ +/* Valid signal for DBGROMADDR. +Connected to DBGROMADDRV */ +#define NB_GLOBAL_CORESIGHT_DBGSELFADDR_VALID (1 << 0) +/* Specifies bits [18:17] of the two’s complement signed offset ... */ +#define NB_GLOBAL_CORESIGHT_DBGSELFADDR_ADDR_18_17_MASK 0x00000180 +#define NB_GLOBAL_CORESIGHT_DBGSELFADDR_ADDR_18_17_SHIFT 7 +/* Specifies bits [39:19] of the two’s complement signed offset ... */ +#define NB_GLOBAL_CORESIGHT_DBGSELFADDR_ADDR_39_19_MASK 0x3FFFFE00 +#define NB_GLOBAL_CORESIGHT_DBGSELFADDR_ADDR_39_19_SHIFT 9 + +/**** Cnt_Control register ****/ +/* System counter enable +Counter is enabled after reset. */ +#define NB_SYSTEM_COUNTER_CNT_CONTROL_EN (1 << 0) +/* System counter restartInitial value is reloaded from Counter_ ... */ +#define NB_SYSTEM_COUNTER_CNT_CONTROL_RESTART (1 << 1) +/* System counter tickSpecifies the counter tick rate relative t ... */ +#define NB_SYSTEM_COUNTER_CNT_CONTROL_SCALE_MASK 0x0000FF00 +#define NB_SYSTEM_COUNTER_CNT_CONTROL_SCALE_SHIFT 8 + +/**** CA15_RF_Misc register ****/ + +#define NB_RAMS_CONTROL_MISC_CA15_RF_MISC_NONECPU_RF_MISC_MASK 0x0000000F +#define NB_RAMS_CONTROL_MISC_CA15_RF_MISC_NONECPU_RF_MISC_SHIFT 0 + +#define NB_RAMS_CONTROL_MISC_CA15_RF_MISC_CPU_RF_MISC_MASK 0x00FFFF00 +#define NB_RAMS_CONTROL_MISC_CA15_RF_MISC_CPU_RF_MISC_SHIFT 8 +/* Pause for CPUs from the time all power is up to the time the ... */ +#define NB_RAMS_CONTROL_MISC_CA15_RF_MISC_PWR_UP_PAUSE_MASK 0xF8000000 +#define NB_RAMS_CONTROL_MISC_CA15_RF_MISC_PWR_UP_PAUSE_SHIFT 27 + +/**** NB_RF_Misc register ****/ +/* SMMU TLB RAMs force power down */ +#define NB_RAMS_CONTROL_MISC_NB_RF_MISC_SMMU_RAM_FORCE_PD (1 << 0) + +/**** Lockn register ****/ +/* Semaphore LockCPU reads it:If current value ==0, return 0 to ... */ +#define NB_SEMAPHORES_LOCKN_LOCK (1 << 0) + +/**** CA15_outputs_1 register ****/ + +#define NB_DEBUG_CA15_OUTPUTS_1_STANDBYWFI_MASK 0x0000000F +#define NB_DEBUG_CA15_OUTPUTS_1_STANDBYWFI_SHIFT 0 + +#define NB_DEBUG_CA15_OUTPUTS_1_CPU_PWR_DN_ACK_MASK 0x000000F0 +#define NB_DEBUG_CA15_OUTPUTS_1_CPU_PWR_DN_ACK_SHIFT 4 + +#define NB_DEBUG_CA15_OUTPUTS_1_IRQOUT_N_MASK 0x00000F00 +#define NB_DEBUG_CA15_OUTPUTS_1_IRQOUT_N_SHIFT 8 + +#define NB_DEBUG_CA15_OUTPUTS_1_FIQOUT_N_MASK 0x0000F000 +#define NB_DEBUG_CA15_OUTPUTS_1_FIQOUT_N_SHIFT 12 + +#define NB_DEBUG_CA15_OUTPUTS_1_CNTHPIRQ_N_MASK 0x000F0000 +#define NB_DEBUG_CA15_OUTPUTS_1_CNTHPIRQ_N_SHIFT 16 + +#define NB_DEBUG_CA15_OUTPUTS_1_NCNTPNSIRQ_N_MASK 0x00F00000 +#define NB_DEBUG_CA15_OUTPUTS_1_NCNTPNSIRQ_N_SHIFT 20 + +#define NB_DEBUG_CA15_OUTPUTS_1_NCNTPSIRQ_N_MASK 0x0F000000 +#define NB_DEBUG_CA15_OUTPUTS_1_NCNTPSIRQ_N_SHIFT 24 + +#define NB_DEBUG_CA15_OUTPUTS_1_NCNTVIRQ_N_MASK 0xF0000000 +#define NB_DEBUG_CA15_OUTPUTS_1_NCNTVIRQ_N_SHIFT 28 + +/**** CA15_outputs_2 register ****/ + +#define NB_DEBUG_CA15_OUTPUTS_2_STANDBYWFIL2 (1 << 0) + +#define NB_DEBUG_CA15_OUTPUTS_2_L2RAM_PWR_DN_ACK (1 << 1) + +/**** cpu_msg register ****/ +/* status/ascii code */ +#define NB_DEBUG_CPU_MSG_STATUS_MASK 0x000000FF +#define NB_DEBUG_CPU_MSG_STATUS_SHIFT 0 +/* toggle with each ascii write */ +#define NB_DEBUG_CPU_MSG_ASCII_TOGGLE (1 << 8) +/* signals ascii */ +#define NB_DEBUG_CPU_MSG_ASCII (1 << 9) + +#define NB_DEBUG_CPU_MSG_RESERVED_11_10_MASK 0x00000C00 +#define NB_DEBUG_CPU_MSG_RESERVED_11_10_SHIFT 10 +/* Signals new section started in S/W */ +#define NB_DEBUG_CPU_MSG_SECTION_START (1 << 12) + +#define NB_DEBUG_CPU_MSG_RESERVED_13 (1 << 13) +/* Signals a single CPU is done. */ +#define NB_DEBUG_CPU_MSG_CPU_DONE (1 << 14) +/* Signals test is done */ +#define NB_DEBUG_CPU_MSG_TEST_DONE (1 << 15) + +/**** ddrc register ****/ +/* External DLL calibration request */ +#define NB_DEBUG_DDRC_DLL_CALIB_EXT_REQ (1 << 0) +/* External request to perform short (long isperformed during in ... */ +#define NB_DEBUG_DDRC_ZQ_SHORT_CALIB_EXT_REQ (1 << 1) +/* External request to perform a refresh command to a specific b ... */ +#define NB_DEBUG_DDRC_RANK_REFRESH_EXT_REQ_MASK 0x0000003C +#define NB_DEBUG_DDRC_RANK_REFRESH_EXT_REQ_SHIFT 2 + +/**** ddrc_phy_smode_control register ****/ +/* DDR PHY special mode */ +#define NB_DEBUG_DDRC_PHY_SMODE_CONTROL_CTL_MASK 0x0000FFFF +#define NB_DEBUG_DDRC_PHY_SMODE_CONTROL_CTL_SHIFT 0 + +/**** ddrc_phy_smode_status register ****/ +/* DDR PHY special mode */ +#define NB_DEBUG_DDRC_PHY_SMODE_STATUS_STT_MASK 0x0000FFFF +#define NB_DEBUG_DDRC_PHY_SMODE_STATUS_STT_SHIFT 0 + +/**** pmc register ****/ +/* Enable system control on NB DRO */ +#define NB_DEBUG_PMC_SYS_EN (1 << 0) +/* NB PMC HVT35 counter value */ +#define NB_DEBUG_PMC_HVT35_VAL_14_0_MASK 0x0000FFFE +#define NB_DEBUG_PMC_HVT35_VAL_14_0_SHIFT 1 +/* NB PMC SVT31 counter value */ +#define NB_DEBUG_PMC_SVT31_VAL_14_0_MASK 0x7FFF0000 +#define NB_DEBUG_PMC_SVT31_VAL_14_0_SHIFT 16 + +/**** cpus_int_out register ****/ +/* Defines which CPUs' IRQ will be triggered out through the cpu ... */ +#define NB_DEBUG_CPUS_INT_OUT_FIQ_EN_MASK 0x0000000F +#define NB_DEBUG_CPUS_INT_OUT_FIQ_EN_SHIFT 0 +/* Defines which CPUs' FIQ will be triggered out through the cpu ... */ +#define NB_DEBUG_CPUS_INT_OUT_IRQ_EN_MASK 0x000000F0 +#define NB_DEBUG_CPUS_INT_OUT_IRQ_EN_SHIFT 4 + +/**** track_dump_ctrl register ****/ +/* [24:16]: queue entry pointer[2] target queue: 1'b0: HazardTr ... */ +#define NB_DEBUG_TRACK_DUMP_CTRL_PTR_MASK 0x7FFFFFFF +#define NB_DEBUG_TRACK_DUMP_CTRL_PTR_SHIFT 0 +/* Track Dump RequestIf set, queue entry info is latched on trac ... */ +#define NB_DEBUG_TRACK_DUMP_CTRL_REQ (1 << 31) + +/**** track_dump_rdata_0 register ****/ +/* valid */ +#define NB_DEBUG_TRACK_DUMP_RDATA_0_VALID (1 << 0) +/* low data */ +#define NB_DEBUG_TRACK_DUMP_RDATA_0_DATA_MASK 0xFFFFFFFE +#define NB_DEBUG_TRACK_DUMP_RDATA_0_DATA_SHIFT 1 + +/**** pos_track_dump_ctrl register ****/ +/* [24:16]: queue entry pointer */ +#define NB_DEBUG_POS_TRACK_DUMP_CTRL_PTR_MASK 0x7FFFFFFF +#define NB_DEBUG_POS_TRACK_DUMP_CTRL_PTR_SHIFT 0 +/* Track Dump RequestIf set, queue entry info is latched on trac ... */ +#define NB_DEBUG_POS_TRACK_DUMP_CTRL_REQ (1 << 31) + +/**** pos_track_dump_rdata_0 register ****/ +/* valid */ +#define NB_DEBUG_POS_TRACK_DUMP_RDATA_0_VALID (1 << 0) +/* low data */ +#define NB_DEBUG_POS_TRACK_DUMP_RDATA_0_DATA_MASK 0xFFFFFFFE +#define NB_DEBUG_POS_TRACK_DUMP_RDATA_0_DATA_SHIFT 1 + +/**** c2swb_track_dump_ctrl register ****/ +/* [24:16]: queue entry pointer */ +#define NB_DEBUG_C2SWB_TRACK_DUMP_CTRL_PTR_MASK 0x7FFFFFFF +#define NB_DEBUG_C2SWB_TRACK_DUMP_CTRL_PTR_SHIFT 0 +/* Track Dump RequestIf set, queue entry info is latched on trac ... */ +#define NB_DEBUG_C2SWB_TRACK_DUMP_CTRL_REQ (1 << 31) + +/**** c2swb_track_dump_rdata_0 register ****/ +/* valid */ +#define NB_DEBUG_C2SWB_TRACK_DUMP_RDATA_0_VALID (1 << 0) +/* low data */ +#define NB_DEBUG_C2SWB_TRACK_DUMP_RDATA_0_DATA_MASK 0xFFFFFFFE +#define NB_DEBUG_C2SWB_TRACK_DUMP_RDATA_0_DATA_SHIFT 1 + +/**** c2swb_bar_ovrd_high register ****/ +/* Read barrier is progress downstream when not terminated in th ... */ +#define NB_DEBUG_C2SWB_BAR_OVRD_HIGH_RD_ADDR_OVRD_EN (1 << 0) +/* address bits 39:32 */ +#define NB_DEBUG_C2SWB_BAR_OVRD_HIGH_ADDR_39_32_MASK 0x00FF0000 +#define NB_DEBUG_C2SWB_BAR_OVRD_HIGH_ADDR_39_32_SHIFT 16 + +/**** Config register ****/ +/* Individual processor control of the endianness configuration ... */ +#define NB_CPUN_CONFIG_STATUS_CONFIG_ENDIAN (1 << 0) +/* Individual processor control of the default exception handlin ... */ +#define NB_CPUN_CONFIG_STATUS_CONFIG_TE (1 << 1) +/* Individual processor control of the location of the exception ... */ +#define NB_CPUN_CONFIG_STATUS_CONFIG_VINITHI (1 << 2) +/* Individual processor control to disable write access to some ... */ +#define NB_CPUN_CONFIG_STATUS_CONFIG_CP15DISABLE (1 << 3) + +/**** Power_Ctrl register ****/ +/* Individual CPU power mode transition requestIf requested to e ... */ +#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_MASK 0x00000003 +#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_SHIFT 0 +/* Normal power mode state */ +#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_NORMAL \ + (0x0 << NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_SHIFT) +/* Dormant power mode state */ +#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_DEEP_IDLE \ + (0x2 << NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_SHIFT) +/* Powered-off power mode */ +#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_POWEREDOFF \ + (0x3 << NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_SHIFT) +/* Power down regret disableWhen power down regret is enabled, t ... */ +#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PWRDN_RGRT_DIS (1 << 16) +/* Power down emulation enableIf set, the entire power down sequ ... */ +#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PWRDN_EMULATE (1 << 17) +/* Disable wakeup from Local--GIC FIQ. */ +#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_WU_LGIC_FIQ_DIS (1 << 18) +/* Disable wakeup from Local-GIC IRQ. */ +#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_WU_LGIC_IRQ_DIS (1 << 19) +/* Disable wakeup from IO-GIC FIQ. */ +#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_WU_IOGIC_FIQ_DIS (1 << 20) +/* Disable wakeup from IO-GIC IRQ. */ +#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_WU_IOGIC_IRQ_DIS (1 << 21) + +/**** Power_Status register ****/ +/* Read-only bits that reflect the individual CPU power mode sta ... */ +#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_MASK 0x00000003 +#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_SHIFT 0 +/* Normal power mode state */ +#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_NORMAL \ + (0x0 << NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_SHIFT) +/* Idle power mode state (WFI) */ +#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_IDLE \ + (0x1 << NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_SHIFT) +/* Dormant power mode state */ +#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_DEEP_IDLE \ + (0x2 << NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_SHIFT) +/* Powered-off power mode */ +#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_POWEREDOFF \ + (0x3 << NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_SHIFT) +/* WFI status */ +#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_WFI (1 << 2) +/* WFE status */ +#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_WFE (1 << 3) + +/**** PMU_Control register ****/ +/* Disable all countersWhen this bit is clear, counter state is ... */ +#define NB_MC_PMU_PMU_CONTROL_DISABLE_ALL (1 << 0) +/* Pause all counters */ +#define NB_MC_PMU_PMU_CONTROL_PAUSE_ALL (1 << 1) +/* Overflow interrupt enable. */ +#define NB_MC_PMU_PMU_CONTROL_OVRF_INTR_EN (1 << 2) +/* Number of monitored events supported by the PMU */ +#define NB_MC_PMU_PMU_CONTROL_NUM_OF_EVENTS_MASK 0x00F80000 +#define NB_MC_PMU_PMU_CONTROL_NUM_OF_EVENTS_SHIFT 19 +/* Number of counters impemented by PMU. */ +#define NB_MC_PMU_PMU_CONTROL_NUM_OF_CNTS_MASK 0x0F000000 +#define NB_MC_PMU_PMU_CONTROL_NUM_OF_CNTS_SHIFT 24 + +/**** Cfg register ****/ +/* Event select */ +#define NB_MC_PMU_COUNTERS_CFG_EVENT_SEL_MASK 0x0000003F +#define NB_MC_PMU_COUNTERS_CFG_EVENT_SEL_SHIFT 0 +/* Enable setting of counter low overflow status bit. */ +#define NB_MC_PMU_COUNTERS_CFG_OVRF_LOW_STT_EN (1 << 6) +/* Enable setting of counter high overflow status bit. */ +#define NB_MC_PMU_COUNTERS_CFG_OVRF_HIGH_STT_EN (1 << 7) +/* Enable pause on trigger in assertion. */ +#define NB_MC_PMU_COUNTERS_CFG_TRIGIN_PAUSE_EN (1 << 8) +/* Enable increment trigger out for trace */ +#define NB_MC_PMU_COUNTERS_CFG_TRIGOUT_EN (1 << 9) +/* Trigger out granule valueSpecifies the number of events count ... */ +#define NB_MC_PMU_COUNTERS_CFG_TRIGOUT_GRANULA_MASK 0x00007C00 +#define NB_MC_PMU_COUNTERS_CFG_TRIGOUT_GRANULA_SHIFT 10 +/* Pause on overflow bitmaskIf set for counter , current coun ... */ +#define NB_MC_PMU_COUNTERS_CFG_PAUSE_ON_OVRF_BITMASK_MASK 0x000F0000 +#define NB_MC_PMU_COUNTERS_CFG_PAUSE_ON_OVRF_BITMASK_SHIFT 16 + +/**** Cntl register ****/ +/* Set the counter state to disable, enable, or pause. */ +#define NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_MASK 0x00000003 +#define NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_SHIFT 0 +/* Disable counter. */ +#define NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_DISABLE \ + (0x0 << NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_SHIFT) +/* Enable counter. */ +#define NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_ENABLE \ + (0x1 << NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_SHIFT) +/* Pause counter. */ +#define NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_PAUSE \ + (0x3 << NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_SHIFT) + +/**** High register ****/ +/* Counter high value */ +#define NB_MC_PMU_COUNTERS_HIGH_COUNTER_MASK 0x0000FFFF +#define NB_MC_PMU_COUNTERS_HIGH_COUNTER_SHIFT 0 + +/**** version register ****/ +/* Revision number (Minor) */ +#define NB_NB_VERSION_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF +#define NB_NB_VERSION_VERSION_RELEASE_NUM_MINOR_SHIFT 0 +/* Revision number (Major) */ +#define NB_NB_VERSION_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00 +#define NB_NB_VERSION_VERSION_RELEASE_NUM_MAJOR_SHIFT 8 +/* Date of release */ +#define NB_NB_VERSION_VERSION_DATE_DAY_MASK 0x001F0000 +#define NB_NB_VERSION_VERSION_DATE_DAY_SHIFT 16 +/* Month of release */ +#define NB_NB_VERSION_VERSION_DATA_MONTH_MASK 0x01E00000 +#define NB_NB_VERSION_VERSION_DATA_MONTH_SHIFT 21 +/* Year of release (starting from 2000) */ +#define NB_NB_VERSION_VERSION_DATE_YEAR_MASK 0x3E000000 +#define NB_NB_VERSION_VERSION_DATE_YEAR_SHIFT 25 +/* Reserved */ +#define NB_NB_VERSION_VERSION_RESERVED_MASK 0xC0000000 +#define NB_NB_VERSION_VERSION_RESERVED_SHIFT 30 + +/**** cpu_vmid register ****/ +/* target VMID */ +#define NB_SRIOV_CPU_VMID_VAL_MASK 0x000000FF +#define NB_SRIOV_CPU_VMID_VAL_SHIFT 0 + +/**** control register ****/ +/* Enable write accesses logging to FIFO instance 0When an inbou ... */ +#define NB_PCIE_LOGGING_CONTROL_WR_EN (1 << 0) +/* Enable read accesses loggingWhen an inbound read from PCIe hi ... */ +#define NB_PCIE_LOGGING_CONTROL_RD_EN (1 << 1) +/* Enable write accesses logging to FIFO instance 1 When an inbo ... */ +#define NB_PCIE_LOGGING_CONTROL_WR_EN_1 (1 << 2) +/* Enable read accesses logging to PCIe 1 and 2When an inbound r ... */ +#define NB_PCIE_LOGGING_CONTROL_RD_EN_1 (1 << 3) +/* Enable loggin the original transaction strobes */ +#define NB_PCIE_LOGGING_CONTROL_STRB_EN (1 << 4) +/* When this bit is set, read will always progress forward (will ... */ +#define NB_PCIE_LOGGING_CONTROL_FREE_RD_ON_WR_EMPTY_EN (1 << 5) +/* Free stalled read whenever write fifo head pointer bit[31] is ... */ +#define NB_PCIE_LOGGING_CONTROL_FREE_RD_ON_WR_FIFO_PTR_UPD_EN (1 << 6) +/* Push pended latched read notification to the current pushed w ... */ +#define NB_PCIE_LOGGING_CONTROL_WR_FIFO_PUSH_LATCH_RD_STATUS_EN (1 << 7) +/* Read latch timeout enable */ +#define NB_PCIE_LOGGING_CONTROL_RD_TIMEOUT_EN (1 << 8) +/* Logging window low */ +#define NB_PCIE_LOGGING_WR_WINDOW_LOW_LOW_MASK 0xFFFFFFC0 +#define NB_PCIE_LOGGING_WR_WINDOW_LOW_LOW_SHIFT 6 + +/**** Wr_Window_High register ****/ +/* Window high address bits +Supports 40-bits memory addressing */ +#define NB_PCIE_LOGGING_WR_WINDOW_HIGH_HIGH_MASK 0x000000FF +#define NB_PCIE_LOGGING_WR_WINDOW_HIGH_HIGH_SHIFT 0 +/* Size maskCorresponds to window low bits 31:6 */ +#define NB_PCIE_LOGGING_WR_WINDOW_SIZE_SIZE_MASK_MASK 0xFFFFFFC0 +#define NB_PCIE_LOGGING_WR_WINDOW_SIZE_SIZE_MASK_SHIFT 6 +/* FIFO base address. +Must be alighed to 4KB */ +#define NB_PCIE_LOGGING_FIFO_BASE_ADDR_MASK 0xFFFFF000 +#define NB_PCIE_LOGGING_FIFO_BASE_ADDR_SHIFT 12 + +/**** FIFO_Size register ****/ +/* FIFO size maskCorresponds to FIFO base address bits 19:12 */ +#define NB_PCIE_LOGGING_FIFO_SIZE_MASK_19_12_MASK 0x000000FF +#define NB_PCIE_LOGGING_FIFO_SIZE_MASK_19_12_SHIFT 0 +/* Logging window low */ +#define NB_PCIE_LOGGING_WR_WINDOW_LOW_1_LOW_MASK 0xFFFFFFC0 +#define NB_PCIE_LOGGING_WR_WINDOW_LOW_1_LOW_SHIFT 6 + +/**** Wr_Window_High_1 register ****/ +/* Window high address bits +Supports 40-bits memory addressing */ +#define NB_PCIE_LOGGING_WR_WINDOW_HIGH_1_HIGH_MASK 0x000000FF +#define NB_PCIE_LOGGING_WR_WINDOW_HIGH_1_HIGH_SHIFT 0 +/* Size maskCorresponds to window low bits 31:6 */ +#define NB_PCIE_LOGGING_WR_WINDOW_SIZE_1_SIZE_MASK_MASK 0xFFFFFFC0 +#define NB_PCIE_LOGGING_WR_WINDOW_SIZE_1_SIZE_MASK_SHIFT 6 +/* FIFO base address. +Must be alighed to 4KB */ +#define NB_PCIE_LOGGING_FIFO_BASE_1_ADDR_MASK 0xFFFFF000 +#define NB_PCIE_LOGGING_FIFO_BASE_1_ADDR_SHIFT 12 + +/**** FIFO_Size_1 register ****/ +/* FIFO size maskCorresponds to FIFO base address bits 19:12 */ +#define NB_PCIE_LOGGING_FIFO_SIZE_1_MASK_19_12_MASK 0x000000FF +#define NB_PCIE_LOGGING_FIFO_SIZE_1_MASK_19_12_SHIFT 0 +/* Logging window low */ +#define NB_PCIE_LOGGING_RD_WINDOW_LOW_LOW_MASK 0xFFFFFFC0 +#define NB_PCIE_LOGGING_RD_WINDOW_LOW_LOW_SHIFT 6 + +/**** Rd_Window_High register ****/ +/* Window high address bits +Supports 40-bits memory addressing */ +#define NB_PCIE_LOGGING_RD_WINDOW_HIGH_HIGH_MASK 0x000000FF +#define NB_PCIE_LOGGING_RD_WINDOW_HIGH_HIGH_SHIFT 0 +/* Size maskCorresponds to window low bits 31:6 */ +#define NB_PCIE_LOGGING_RD_WINDOW_SIZE_SIZE_MASK_MASK 0xFFFFFFC0 +#define NB_PCIE_LOGGING_RD_WINDOW_SIZE_SIZE_MASK_SHIFT 6 + +/**** Read_Latch register ****/ +/* Set by hardware when the read address is stalled and latched ... */ +#define NB_PCIE_LOGGING_READ_LATCH_VALID (1 << 0) +/* Latched read address [30:0] */ +#define NB_PCIE_LOGGING_READ_LATCH_ADDR_MASK 0xFFFFFFFE +#define NB_PCIE_LOGGING_READ_LATCH_ADDR_SHIFT 1 +/* Logging window low */ +#define NB_PCIE_LOGGING_RD_WINDOW_LOW_1_LOW_MASK 0xFFFFFFC0 +#define NB_PCIE_LOGGING_RD_WINDOW_LOW_1_LOW_SHIFT 6 + +/**** Rd_Window_High_1 register ****/ +/* Window high address bits +Supports 40-bits memory addressing */ +#define NB_PCIE_LOGGING_RD_WINDOW_HIGH_1_HIGH_MASK 0x000000FF +#define NB_PCIE_LOGGING_RD_WINDOW_HIGH_1_HIGH_SHIFT 0 +/* Size maskCorresponds to window low bits 31:6 */ +#define NB_PCIE_LOGGING_RD_WINDOW_SIZE_1_SIZE_MASK_MASK 0xFFFFFFC0 +#define NB_PCIE_LOGGING_RD_WINDOW_SIZE_1_SIZE_MASK_SHIFT 6 + +/**** Read_Latch_1 register ****/ +/* Set by hardware when the read address is stalled and latched ... */ +#define NB_PCIE_LOGGING_READ_LATCH_1_VALID (1 << 0) +/* Latched read address [30:0] */ +#define NB_PCIE_LOGGING_READ_LATCH_1_ADDR_MASK 0xFFFFFFFE +#define NB_PCIE_LOGGING_READ_LATCH_1_ADDR_SHIFT 1 + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_NB_REG_H */ + +/** @} end of ... group */ + + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie.c new file mode 100644 index 00000000000000..efd03f3306fc43 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie.c @@ -0,0 +1,1415 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#include +#include +#include +#include "al_hal_pcie_regs.h" + +/* --->>> Parameters definitions <<<--- */ +#define AL_PCIE_REV_ID_0 0 +#define AL_PCIE_REV_ID_1 1 + +#define AL_PCIE_AXI_REGS_OFFSET 0x0 +#define AL_PCIE_APP_REGS_OFFSET 0x1000 +#define AL_PCIE_CORE_CONF_BASE_OFFSET 0x2000 + +#define AL_PCIE_LTSSM_STATE_L0 0x11 +#define AL_PCIE_LTSSM_STATE_L0S 0x12 +#define AL_PCIE_DEVCTL_PAYLOAD_128B 0x00 +#define AL_PCIE_DEVCTL_PAYLOAD_256B 0x20 + +#define AL_PCIE_SECBUS_DEFAULT 0x1 +#define AL_PCIE_SUBBUS_DEFAULT 0x1 +#define AL_PCIE_LINKUP_WAIT_INTERVAL 50 /* measured in usec */ +#define AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC 20 + +#define AL_PCIE_LINKUP_RETRIES 8 + +#define AL_PCIE_MAX_32_MEMORY_BAR_SIZE (0x100000000ULL) +#define AL_PCIE_MIN_MEMORY_BAR_SIZE (1 << 12) +#define AL_PCIE_MIN_IO_BAR_SIZE (1 << 8) + + +/* --->>> MACROS <<<--- */ +#define AL_PCIE_PARSE_LANES(v) (((1 << v) - 1) << \ + PCIE_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT) + +/* --->>> static functions <<<--- */ + +static void +al_pcie_port_enable_wr_to_rd_only(struct al_pcie_port *pcie_port) +{ + if (pcie_port->write_to_read_only_enabled == AL_TRUE) + return; + al_dbg("PCIe %d: Enable write to Read Only fields\n", pcie_port->port_id); + al_reg_write32(&pcie_port->regs->core_space.port_regs.rd_only_wr_en, 1); + + pcie_port->write_to_read_only_enabled = AL_TRUE; +} + +/** helper function to access dbi_cs2 registers */ +void al_reg_write32_dbi_cs2(uint32_t * offset, uint32_t val) +{ + al_reg_write32(offset + (0x1000 >> 2),val); +} + +int al_pcie_port_max_lanes_set(struct al_pcie_port *pcie_port, uint8_t lanes) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + /* convert to bitmask format (4 ->'b1111, 2 ->'b11, 1 -> 'b1) */ + uint32_t active_lanes_val = AL_PCIE_PARSE_LANES(lanes); + + al_reg_write32_masked(®s->axi.pcie_global.conf, + PCIE_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK, + active_lanes_val); + + pcie_port->max_lanes = lanes; + return 0; +} + +void al_pcie_port_memory_shutdown_set( + struct al_pcie_port *pcie_port, + al_bool enable) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_reg_write32_masked(®s->axi.pcie_global.conf, + PCIE_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN, + enable == AL_TRUE ? + PCIE_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN : 0); +} + +static unsigned int al_pcie_speed_gen_code(enum al_pcie_link_speed speed) +{ + if (speed == AL_PCIE_LINK_SPEED_GEN1) + return 1; + if (speed == AL_PCIE_LINK_SPEED_GEN2) + return 2; + if (speed == AL_PCIE_LINK_SPEED_GEN3) + return 3; + /* must not be reached */ + return 0; +} + +static int +al_pcie_port_link_config(struct al_pcie_port *pcie_port, + struct al_pcie_link_params *link_params) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_dbg("PCIe %d: link config: max speed gen %d, max lanes %d, reversal %s\n", + pcie_port->port_id, link_params->max_speed, + pcie_port->max_lanes, link_params->enable_reversal? "enable" : "disable"); + + al_pcie_port_enable_wr_to_rd_only(pcie_port); + + if (link_params->max_speed != AL_PCIE_LINK_SPEED_DEFAULT) { + uint16_t max_speed_val = (uint16_t)al_pcie_speed_gen_code(link_params->max_speed); + al_reg_write32_masked((uint32_t __iomem *)(®s->core_space.pcie_link_cap_base), + 0xF, max_speed_val); + al_reg_write32_masked((uint32_t __iomem *)(®s->core_space.pcie_cap_base + (AL_PCI_EXP_LNKCTL2 >> 2)), + 0xF, max_speed_val); + } + + /* TODO: add support for reversal mode */ + if (link_params->enable_reversal) { + al_err("PCIe %d: enabling reversal mode not implemented\n", + pcie_port->port_id); + return -ENOSYS; + } + return 0; +} + +static void al_pcie_port_ram_parity_int_config( + struct al_pcie_port *pcie_port, + al_bool enable) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_reg_write32(®s->app.parity.en_core, + (enable == AL_TRUE) ? 0xffffffff : 0x0); +} + +static void al_pcie_port_axi_parity_int_config( + struct al_pcie_port *pcie_port, + al_bool enable) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_reg_write32(®s->axi.parity.en_axi, + (enable == AL_TRUE) ? 0xffffffff : 0x0); +} + +static int +al_pcie_port_lat_rply_timers_config(struct al_pcie_port * pcie_port, + struct al_pcie_latency_replay_timers *lat_rply_timers) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t reg = 0; + + AL_REG_FIELD_SET(reg, 0xFFFF, 0, lat_rply_timers->round_trip_lat_limit); + AL_REG_FIELD_SET(reg, 0xFFFF0000, 16, lat_rply_timers->replay_timer_limit); + + al_reg_write32(®s->core_space.port_regs.ack_lat_rply_timer, reg); + return 0; +} + +int +al_pcie_port_snoop_config(struct al_pcie_port *pcie_port, al_bool enable_axi_snoop) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + /* Set snoop mode */ + al_info("PCIE_%d: snoop mode %s\n", + pcie_port->port_id, enable_axi_snoop ? "enable" : "disable"); + + if (enable_axi_snoop) { + al_reg_write32_masked(®s->axi.ctrl.master_arctl, + PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP, + PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP); + + al_reg_write32_masked(®s->axi.ctrl.master_awctl, + PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP, + PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP); + } else { + al_reg_write32_masked(®s->axi.ctrl.master_arctl, + PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP, + PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP); + + al_reg_write32_masked(®s->axi.ctrl.master_awctl, + PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP, + PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP); + } + return 0; +} + +static int +al_pcie_port_gen2_params_config(struct al_pcie_port *pcie_port, + struct al_pcie_gen2_params *gen2_params) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t gen2_ctrl; + + al_dbg("PCIe %d: Gen2 params config: Tx Swing %s, interrupt on link Eq %s, set Deemphasis %s\n", + pcie_port->port_id, + gen2_params->tx_swing_low ? "Low" : "Full", + gen2_params->tx_compliance_receive_enable? "enable" : "disable", + gen2_params->set_deemphasis? "enable" : "disable"); + + gen2_ctrl = al_reg_read32(®s->core_space.port_regs.gen2_ctrl); + + if (gen2_params->tx_swing_low) + AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT); + else + AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT); + + if (gen2_params->tx_compliance_receive_enable) + AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT); + else + AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT); + + if (gen2_params->set_deemphasis) + AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT); + else + AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT); + + al_reg_write32(®s->core_space.port_regs.gen2_ctrl, gen2_ctrl); + + return 0; +} + + +static uint16_t +gen3_lane_eq_param_to_val(struct al_pcie_gen3_lane_eq_params *eq_params) +{ + uint16_t eq_control = 0; + + eq_control = eq_params->downstream_port_transmitter_preset & 0xF; + eq_control |= (eq_params->downstream_port_receiver_preset_hint & 0x7) << 4; + eq_control |= (eq_params->upstream_port_transmitter_preset & 0xF) << 8; + eq_control |= (eq_params->upstream_port_receiver_preset_hint & 0x7) << 12; + + return eq_control; +} + +static int +al_pcie_port_gen3_params_config(struct al_pcie_port *pcie_port, + struct al_pcie_gen3_params *gen3_params) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t reg = 0; + uint16_t __iomem *lanes_eq_base = (uint16_t __iomem *)(®s->core_space.pcie_sec_ext_cap_base + (0xC >> 2)); + int i; + + al_dbg("PCIe %d: Gen3 params config: Equalization %s, interrupt on link Eq %s\n", + pcie_port->port_id, + gen3_params->perform_eq ? "enable" : "disable", + gen3_params->interrupt_enable_on_link_eq_request? "enable" : "disable"); + + if (gen3_params->perform_eq) + AL_REG_BIT_SET(reg, 0); + if (gen3_params->interrupt_enable_on_link_eq_request) + AL_REG_BIT_SET(reg, 1); + + al_reg_write32(®s->core_space.pcie_sec_ext_cap_base + (4 >> 2), + reg); + + + + for (i = 0; i < gen3_params->eq_params_elements; i += 2) { + uint32_t eq_control = + (uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i) | + (uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i + 1) << 16; + + al_dbg("PCIe %d: Set EQ (0x%08x) for lane %d, %d\n", pcie_port->port_id, eq_control, i, i + 1); + al_reg_write32((uint32_t *)(lanes_eq_base + i), eq_control); + } + reg = al_reg_read32(®s->core_space.port_regs.gen3_ctrl); + if (gen3_params->eq_disable) + AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT); + else + AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT); + + if (gen3_params->eq_phase2_3_disable) + AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT); + else + AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT); + + al_reg_write32(®s->core_space.port_regs.gen3_ctrl, reg); + + reg = 0; + AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_LF_MASK, + PCIE_PORT_GEN3_EQ_LF_SHIFT, + gen3_params->local_lf); + AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_FS_MASK, + PCIE_PORT_GEN3_EQ_FS_SHIFT, + gen3_params->local_fs); + + al_reg_write32(®s->core_space.port_regs.gen3_eq_fs_lf, reg); + + reg = 0; + AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_MASK, + PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_SHIFT, + gen3_params->local_lf); + AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_MASK, + PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_SHIFT, + gen3_params->local_fs); + al_reg_write32(®s->axi.conf.zero_lane0, reg); + al_reg_write32(®s->axi.conf.zero_lane1, reg); + al_reg_write32(®s->axi.conf.zero_lane2, reg); + al_reg_write32(®s->axi.conf.zero_lane3, reg); + + /* + * Gen3 EQ Control Register: + * - Preset Request Vector - request 3-5 + * - Behavior After 24 ms Timeout (when optimal settings are not + * found): Recovery.Equalization.RcvrLock + * - Phase2_3 2 ms Timeout Disable + * - Feedback Mode - Figure Of Merit + */ + reg = 0x00001831; + al_reg_write32(®s->core_space.port_regs.gen3_eq_ctrl, reg); + + return 0; +} + +static int +al_pcie_port_tl_credits_config(struct al_pcie_port *pcie_port, + struct al_pcie_tl_credits_params *tl_credits __attribute__((__unused__))) +{ + al_err("PCIe %d: transport layer credits config not implemented\n", + pcie_port->port_id); + + return -ENOSYS; + +} + +static int +al_pcie_port_ep_params_config(struct al_pcie_port *pcie_port, + struct al_pcie_ep_params *ep_params) +{ + struct al_pcie_regs *regs = pcie_port->regs; + int bar_idx; + + al_pcie_port_enable_wr_to_rd_only(pcie_port); + + /* Disable D1 and D3hot capabilities */ + if (ep_params->cap_d1_d3hot_dis) + al_reg_write32_masked( + ®s->core_space.pcie_pm_cap_base, + AL_FIELD_MASK(26, 25) | AL_FIELD_MASK(31, 28), 0); + + /* Disable FLR capability */ + if (ep_params->cap_flr_dis) + al_reg_write32_masked( + ®s->core_space.pcie_dev_cap_base, + AL_BIT(28), 0); + + /* Disable ASPM capability */ + if (ep_params->cap_aspm_dis) { + al_reg_write32_masked( + ®s->core_space.pcie_cap_base + (AL_PCI_EXP_LNKCAP >> 2), + AL_PCI_EXP_LNKCAP_ASPMS, 0); + } else if (pcie_port->rev_id == AL_PCIE_REV_ID_0) { + al_warn("%s: ASPM support is enabled, please disable it\n", + __func__); + return -EINVAL; + } + + /* Enable relaxed PCIe ordering: Disable read completion after write + * ordering. + */ + if (ep_params->relaxed_pcie_ordering) + al_reg_write32_masked( + ®s->axi.ordering.pos_cntl, + AL_BIT(5) | AL_BIT(10), + AL_BIT(5) | AL_BIT(10)); + + if (!ep_params->bar_params_valid) + return 0; + + for (bar_idx = 0; bar_idx < 6;){ /* bar_idx will be incremented depending on bar type */ + struct al_pcie_ep_bar_params *params = ep_params->bar_params + bar_idx; + uint32_t mask = 0; + uint32_t ctrl = 0; + uint32_t __iomem *bar_addr = ®s->core_space.config_header[(AL_PCI_BASE_ADDRESS_0 >> 2) + bar_idx]; + + if (params->enable) { + uint64_t size = params->size; + + if (params->memory_64_bit) { + struct al_pcie_ep_bar_params *next_params = params + 1; + /* 64 bars start at even index (BAR0, BAR 2 or BAR 4) */ + if (bar_idx & 1) + return -EINVAL; + + /* next BAR must be disabled */ + if (next_params->enable) + return -EINVAL; + + /* 64 bar must be memory bar */ + if (!params->memory_space) + return -EINVAL; + } else { + if (size > AL_PCIE_MAX_32_MEMORY_BAR_SIZE) + return -EINVAL; + /* 32 bit space can't be prefetchable */ + if (params->memory_is_prefetchable) + return -EINVAL; + } + + if (params->memory_space) { + if (size < AL_PCIE_MIN_MEMORY_BAR_SIZE) { + al_err("PCIe %d: memory BAR %d: size (0x%llx) less that minimal allowed value\n", + pcie_port->port_id, bar_idx, size); + return -EINVAL; + } + } else { + /* IO can't be prefetchable */ + if (params->memory_is_prefetchable) + return -EINVAL; + + if (size < AL_PCIE_MIN_IO_BAR_SIZE) { + al_err("PCIe %d: IO BAR %d: size (0x%llx) less that minimal allowed value\n", + pcie_port->port_id, bar_idx, size); + return -EINVAL; + } + } + + /* size must be power of 2 */ + if (size & (size - 1)) { + al_err("PCIe %d: BAR %d:size (0x%llx) must be " + "power of 2\n", + pcie_port->port_id, bar_idx, size); + return -EINVAL; + } + + /* If BAR is 64-bit, disable the next BAR before + * configuring this one + */ + if (params->memory_64_bit) + al_reg_write32_dbi_cs2(bar_addr + 1, 0); + + mask = 1; /* enable bit*/ + mask |= (params->size - 1) & 0xFFFFFFFF; + + al_reg_write32_dbi_cs2(bar_addr , mask); + + if (params->memory_space == AL_FALSE) + ctrl = AL_PCI_BASE_ADDRESS_SPACE_IO; + if (params->memory_64_bit) + ctrl |= AL_PCI_BASE_ADDRESS_MEM_TYPE_64; + if (params->memory_is_prefetchable) + ctrl |= AL_PCI_BASE_ADDRESS_MEM_PREFETCH; + al_reg_write32(bar_addr, ctrl); + + if (params->memory_64_bit) { + mask = ((params->size - 1) >> 32) & 0xFFFFFFFF; + al_reg_write32_dbi_cs2(bar_addr + 1, mask); + } + + } else { + al_reg_write32_dbi_cs2(bar_addr , mask); + } + if (params->enable && params->memory_64_bit) + bar_idx += 2; + else + bar_idx += 1; + } + if (ep_params->exp_bar_params.enable) { + al_err("PCIe %d: Expansion BAR enable not supported\n", pcie_port->port_id ); + return -ENOSYS; + } + + /* Open CPU generated msi and legacy interrupts in pcie wrapper logic */ + al_reg_write32(®s->app.soc_int.mask_inta_leg_0, (1 << 21)); + + /** + * Addressing RMN: 1547 + * + * RMN description: + * 1. Whenever writing to 0x2xx offset, the write also happens to + * 0x3xx address, meaning two registers are written instead of one. + * 2. Read and write from 0x3xx work ok. + * + * Software flow: + * Backup the value of the app.int_grp_a.mask_a register, because + * app.int_grp_a.mask_clear_a gets overwritten during the write to + * app.soc.mask_msi_leg_0 register. + * Restore the original value after the write to app.soc.mask_msi_leg_0 + * register. + */ + if (pcie_port->rev_id == AL_PCIE_REV_ID_0) { + uint32_t backup; + + backup = al_reg_read32(®s->app.int_grp_a_m0.mask_a); + al_reg_write32(®s->app.soc_int.mask_msi_leg_0, (1 << 22)); + al_reg_write32(®s->app.int_grp_a_m0.mask_a, backup); + } else + al_reg_write32(®s->app.soc_int.mask_msi_leg_0, (1 << 22)); + + return 0; +} + +static void +al_pcie_port_features_config(struct al_pcie_port *pcie_port, + struct al_pcie_features *features) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_assert(pcie_port->rev_id > AL_PCIE_REV_ID_0); + + al_reg_write32_masked( + ®s->app.ctrl_gen.features, + PCIE_W_CTRL_GEN_FEATURES_SATA_EP_MSI_FIX, + features->sata_ep_msi_fix ? + PCIE_W_CTRL_GEN_FEATURES_SATA_EP_MSI_FIX : 0); +} + +static void +al_pcie_port_ib_hcrd_config(struct al_pcie_port *pcie_port) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_reg_write32_masked( + ®s->core_space.port_regs.vc0_posted_rcv_q_ctrl, + RADM_PQ_HCRD_VC0_MASK, + (pcie_port->nof_p_hdr - 1) << RADM_PQ_HCRD_VC0_SHIFT); + + al_reg_write32_masked( + ®s->core_space.port_regs.vc0_non_posted_rcv_q_ctrl, + RADM_NPQ_HCRD_VC0_MASK, + (pcie_port->nof_np_hdr - 1) << RADM_NPQ_HCRD_VC0_SHIFT); +} + +enum al_pcie_function_mode al_pcie_function_mode_get( + struct al_pcie_port *pcie_port) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t reg, device_type; + + al_assert(pcie_port); + + reg = al_reg_read32(®s->axi.pcie_global.conf); + + device_type = AL_REG_FIELD_GET(reg, + PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK, + PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT); + + switch (device_type) { + case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP: + return AL_PCIE_FUNCTION_MODE_EP; + case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC: + return AL_PCIE_FUNCTION_MODE_RC; + default: + al_err("PCIe %d: unknown device type (%d) in global conf " + "register.\n", + pcie_port->port_id, device_type); + } + return AL_PCIE_FUNCTION_MODE_UNKNOWN; +} + +static void +al_pcie_port_ep_iov_setup( + struct al_pcie_port *pcie_port, + struct al_pcie_ep_iov_params *ep_iov_params) +{ + struct al_pcie_regs *regs = pcie_port->regs; + enum al_pcie_function_mode func_mode = + al_pcie_function_mode_get(pcie_port); + + al_assert(func_mode == AL_PCIE_FUNCTION_MODE_EP); + + al_reg_write32_masked( + ®s->axi.pre_configuration.pcie_core_setup, + PCIE_AXI_CORE_SETUP_SRIOV_ENABLE, + ((ep_iov_params->sriov_vfunc_en == AL_TRUE) ? + PCIE_AXI_CORE_SETUP_SRIOV_ENABLE : 0)); + + al_reg_write32_masked(®s->app.cfg_elbi.emulation, + PCIE_W_CFG_EMUL_CTRL_FIX_CLIENT1_FMT_EN, + ((ep_iov_params->support_32b_address_in_iov == AL_TRUE) ? + PCIE_W_CFG_EMUL_CTRL_FIX_CLIENT1_FMT_EN : 0)); +} + + +/******************** link operations ***************************************/ + +/** return AL_TRUE if link is up, AL_FALSE otherwise */ +static al_bool al_pcie_check_link(struct al_pcie_port *pcie_port, + uint8_t *ltssm_ret) +{ + struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs; + uint32_t info_0; + uint8_t ltssm_state; + + info_0 = al_reg_read32(®s->app.debug.info_0); + + ltssm_state = AL_REG_FIELD_GET(info_0, + PCIE_W_DEBUG_INFO_0_LTSSM_STATE_MASK, + PCIE_W_DEBUG_INFO_0_LTSSM_STATE_SHIFT); + + al_dbg("PCIe %d: Port Debug 0: 0x%08x. LTSSM state :0x%x\n", + pcie_port->port_id, info_0, ltssm_state); + + if (ltssm_ret) + *ltssm_ret = ltssm_state; + + if ((ltssm_state == AL_PCIE_LTSSM_STATE_L0) || + (ltssm_state == AL_PCIE_LTSSM_STATE_L0S)) + return AL_TRUE; + return AL_FALSE; +} + +/******************************* API functions ********************************/ +/** Enable PCIe port (deassert reset) */ +int al_pcie_port_enable( + struct al_pcie_port *pcie_port, + void __iomem *pbs_reg_base) +{ + struct al_pbs_regs *pbs_regs = (struct al_pbs_regs *)pbs_reg_base; + struct al_pcie_regs *regs = pcie_port->regs; + unsigned int port_id = pcie_port->port_id; + + /* + * Disable ATS capability + * - must be done before core reset deasserted + * - rev_id 0 - no effect, but no harm + */ + al_reg_write32_masked( + ®s->axi.ordering.pos_cntl, + PCIE_AXI_CORE_SETUP_ATS_CAP_DIS, + PCIE_AXI_CORE_SETUP_ATS_CAP_DIS); + + /* Deassert core reset */ + al_reg_write32_masked( + &pbs_regs->unit.pcie_conf_1, + 1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT), + 1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT)); + + return 0; +} + +/** Disable PCIe port (assert reset) */ +void al_pcie_port_disable( + struct al_pcie_port *pcie_port, + void __iomem *pbs_reg_base) +{ + struct al_pbs_regs *pbs_regs = (struct al_pbs_regs *)pbs_reg_base; + unsigned int port_id = pcie_port->port_id; + + /* Assert core reset */ + al_reg_write32_masked( + &pbs_regs->unit.pcie_conf_1, + 1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT), + 0); +} + +/** Initializes a PCIe handle structure. */ +int al_pcie_handle_init(struct al_pcie_port *pcie_port, + void __iomem *pcie_reg_base, + unsigned int port_id) +{ + pcie_port->regs = pcie_reg_base; + pcie_port->port_id = port_id; + pcie_port->write_to_read_only_enabled = AL_FALSE; + pcie_port->max_lanes = 0; + pcie_port->ib_hcrd_config_required = AL_FALSE; + + al_dbg("pcie port handle initialized. port id: %d. regs base %p\n", + port_id, pcie_reg_base); + return 0; +} + +/** configure function mode (root complex or endpoint) */ +int +al_pcie_port_func_mode_config(struct al_pcie_port *pcie_port, + enum al_pcie_function_mode mode) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t reg, device_type, new_device_type; + + reg = al_reg_read32(®s->axi.pcie_global.conf); + + device_type = AL_REG_FIELD_GET(reg, + PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK, + PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT); + if (mode == AL_PCIE_FUNCTION_MODE_EP) + new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP; + else + new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC; + + if (new_device_type == device_type) { + al_dbg("PCIe %d: function mode already set to %s", + pcie_port->port_id, (mode == AL_PCIE_FUNCTION_MODE_EP) ? + "EndPoint" : "Root Complex"); + return 0; + } + al_info("PCIe %d: set function mode to %s\n", + pcie_port->port_id, (mode == AL_PCIE_FUNCTION_MODE_EP) ? + "EndPoint" : "Root Complex"); + AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK, + PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT, + new_device_type); + + al_reg_write32(®s->axi.pcie_global.conf, reg); + + return 0; +} + +/* Inbound header credits and outstanding outbound reads configuration */ +void al_pcie_port_ib_hcrd_os_ob_reads_config( + struct al_pcie_port *pcie_port, + struct al_pcie_ib_hcrd_os_ob_reads_config *ib_hcrd_os_ob_reads_config) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_assert(ib_hcrd_os_ob_reads_config->nof_np_hdr > 0); + + al_assert(ib_hcrd_os_ob_reads_config->nof_p_hdr > 0); + + al_assert(ib_hcrd_os_ob_reads_config->nof_cpl_hdr > 0); + + al_assert( + (ib_hcrd_os_ob_reads_config->nof_cpl_hdr + + ib_hcrd_os_ob_reads_config->nof_np_hdr + + ib_hcrd_os_ob_reads_config->nof_p_hdr) == AL_PCIE_IB_HCRD_SUM); + + al_assert( + (ib_hcrd_os_ob_reads_config->nof_outstanding_ob_reads * + (unsigned int)AL_PCIE_NOF_CPL_HDR_NOF_OS_OB_READS_MIN_RATIO) <= + ib_hcrd_os_ob_reads_config->nof_cpl_hdr); + + al_assert( + ib_hcrd_os_ob_reads_config->nof_p_hdr <= + AL_PCIE_NOF_P_NP_HDR_MAX); + + al_assert( + ib_hcrd_os_ob_reads_config->nof_np_hdr <= + AL_PCIE_NOF_P_NP_HDR_MAX); + + al_reg_write32_masked( + ®s->axi.init_fc.cfg, + PCIE_AXI_INIT_FC_CFG_NOF_P_HDR_MASK | + PCIE_AXI_INIT_FC_CFG_NOF_NP_HDR_MASK | + PCIE_AXI_INIT_FC_CFG_NOF_CPL_HDR_MASK, + (ib_hcrd_os_ob_reads_config->nof_p_hdr << + PCIE_AXI_INIT_FC_CFG_NOF_P_HDR_SHIFT) | + (ib_hcrd_os_ob_reads_config->nof_np_hdr << + PCIE_AXI_INIT_FC_CFG_NOF_NP_HDR_SHIFT) | + (ib_hcrd_os_ob_reads_config->nof_cpl_hdr << + PCIE_AXI_INIT_FC_CFG_NOF_CPL_HDR_SHIFT)); + + al_reg_write32_masked( + ®s->axi.pre_configuration.pcie_core_setup, + PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_MASK, + ib_hcrd_os_ob_reads_config->nof_outstanding_ob_reads << + PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_SHIFT); + + /* Store 'nof_p_hdr' and 'nof_np_hdr' to be set in the core later */ + pcie_port->nof_np_hdr = ib_hcrd_os_ob_reads_config->nof_np_hdr; + pcie_port->nof_p_hdr = ib_hcrd_os_ob_reads_config->nof_p_hdr; + pcie_port->ib_hcrd_config_required = AL_TRUE; +} + +/*TODO: move those defines */ +/** return current function mode (root complex or endpoint) */ +enum al_pcie_function_mode +al_pcie_function_type_get(struct al_pcie_port *pcie_port) +{ + return al_pcie_function_mode_get(pcie_port); +} + +/** configure pcie port (link params, etc..) */ +int al_pcie_port_config(struct al_pcie_port *pcie_port, + struct al_pcie_config_params *params) +{ + struct al_pcie_regs *regs = pcie_port->regs; + int status = 0; + + al_assert(pcie_port); + al_assert(params); + + al_dbg("PCIe %d: port config\n", pcie_port->port_id); + + /* Read revision ID */ + pcie_port->rev_id = al_reg_read32( + (uint32_t __iomem *)(®s->core_space.config_header[0] + + (PCI_CLASS_REVISION >> 2))) & 0xff; + + if (pcie_port->rev_id == AL_PCIE_REV_ID_0) { + pcie_port->app_int_grp_a_base = + (uint32_t __iomem *)®s->app.int_grp_a_m0; + pcie_port->app_int_grp_b_base = + (uint32_t __iomem *)®s->app.int_grp_b_m0; + } else { + pcie_port->app_int_grp_a_base = + (uint32_t __iomem *)®s->app.int_grp_a; + pcie_port->app_int_grp_b_base = + (uint32_t __iomem *)®s->app.int_grp_b; + } + + pcie_port->axi_int_grp_a_base = + (uint32_t __iomem *)®s->axi.int_grp_a; + + /* if max lanes not specifies, read it from register */ + if (pcie_port->max_lanes == 0) { + uint32_t global_conf = al_reg_read32(®s->axi.pcie_global.conf); + pcie_port->max_lanes = AL_REG_FIELD_GET(global_conf, + PCIE_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK, + PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT); + } + + if (params->link_params) + status = al_pcie_port_link_config(pcie_port, params->link_params); + if (status) + goto done; + + status = al_pcie_port_snoop_config(pcie_port, params->enable_axi_snoop); + if (status) + goto done; + + al_pcie_port_ram_parity_int_config(pcie_port, params->enable_ram_parity_int); + + al_pcie_port_axi_parity_int_config(pcie_port, params->enable_axi_parity_int); + + if (params->lat_rply_timers) + status = al_pcie_port_lat_rply_timers_config(pcie_port, params->lat_rply_timers); + if (status) + goto done; + + if (params->gen2_params) + status = al_pcie_port_gen2_params_config(pcie_port, params->gen2_params); + if (status) + goto done; + + if (params->gen3_params) + status = al_pcie_port_gen3_params_config(pcie_port, params->gen3_params); + if (status) + goto done; + + if (params->tl_credits) + status = al_pcie_port_tl_credits_config(pcie_port, params->tl_credits); + if (status) + goto done; + + if (params->ep_params) + status = al_pcie_port_ep_params_config(pcie_port, params->ep_params); + if (status) + goto done; + + if (params->features) + al_pcie_port_features_config(pcie_port, params->features); + + if (pcie_port->ib_hcrd_config_required == AL_TRUE) + al_pcie_port_ib_hcrd_config(pcie_port); + + if (params->ep_iov_params) + al_pcie_port_ep_iov_setup(pcie_port, params->ep_iov_params); + + if (params->fast_link_mode) { + al_reg_write32_masked(®s->core_space.port_regs.port_link_ctrl, + 1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT, + 1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT); + } + + if (params->enable_axi_slave_err_resp) + al_reg_write32_masked(®s->core_space.port_regs.axi_slave_err_resp, + 1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT, + 1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT); + + /* enable memory and I/O access from port when in RC mode*/ + if (params->function_mode == AL_PCIE_FUNCTION_MODE_RC) { + al_reg_write16_masked((uint16_t __iomem *)(®s->core_space.config_header[0] + (0x4 >> 2)), + 0x7, /* Mem, MSE, IO */ + 0x7); + /* change the class code to match pci bridge */ + al_reg_write32_masked((uint32_t __iomem *)(®s->core_space.config_header[0] + (PCI_CLASS_REVISION >> 2)), + 0xFFFFFF00, + 0x06040000); + } +done: + al_dbg("PCIe %d: port config %s\n", pcie_port->port_id, status? "failed": "done"); + + return status; +} + +/* Enable/disable deferring incoming configuration requests */ +void al_pcie_app_req_retry_set( + struct al_pcie_port *pcie_port, + al_bool en) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_reg_write32_masked( + ®s->app.global_ctrl.pm_control, + PCIE_W_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN, + (en == AL_TRUE) ? + PCIE_W_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN : 0); +} + +/* start pcie link */ +int al_pcie_link_start(struct al_pcie_port *pcie_port) +{ + struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs; + + al_dbg("PCIe_%d: start port link.\n", pcie_port->port_id); + + al_reg_write32_masked( + ®s->app.global_ctrl.port_init, + PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN, + PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN); + + return 0; +} + +/* stop pcie link */ +int al_pcie_link_stop(struct al_pcie_port *pcie_port) +{ + struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs; + + al_dbg("PCIe_%d: stop port link.\n", pcie_port->port_id); + + al_reg_write32_masked( + ®s->app.global_ctrl.port_init, + PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN, + ~PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN); + + return 0; +} + +/* wait for link up indication */ +int al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms) +{ + int wait_count = timeout_ms * AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC; + + while (wait_count-- > 0) { + if (al_pcie_check_link(pcie_port, NULL)) { + al_info("PCIe_%d: <<<<<<<<< Link up >>>>>>>>>\n", pcie_port->port_id); + return 0; + } else + al_dbg("PCIe_%d: No link up, %d attempts remaining\n", + pcie_port->port_id, wait_count); + + al_udelay(AL_PCIE_LINKUP_WAIT_INTERVAL); + } + al_info("PCIE_%d: link is not established in time\n", + pcie_port->port_id); + + return -ETIME; +} + +/** get link status */ +int al_pcie_link_status(struct al_pcie_port *pcie_port, + struct al_pcie_link_status *status) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint16_t pcie_lnksta; + + al_assert(status); + + status->link_up = al_pcie_check_link(pcie_port, &status->ltssm_state); + + if (!status->link_up) { + status->speed = AL_PCIE_LINK_SPEED_DEFAULT; + status->lanes = 0; + return 0; + } + + pcie_lnksta = al_reg_read16((uint16_t __iomem *)®s->core_space.pcie_cap_base + (AL_PCI_EXP_LNKSTA >> 1)); + + switch(pcie_lnksta & AL_PCI_EXP_LNKSTA_CLS) { + case AL_PCI_EXP_LNKSTA_CLS_2_5GB: + status->speed = AL_PCIE_LINK_SPEED_GEN1; + break; + case AL_PCI_EXP_LNKSTA_CLS_5_0GB: + status->speed = AL_PCIE_LINK_SPEED_GEN2; + break; + case AL_PCI_EXP_LNKSTA_CLS_8_0GB: + status->speed = AL_PCIE_LINK_SPEED_GEN3; + break; + default: + status->speed = AL_PCIE_LINK_SPEED_DEFAULT; + al_err("PCIe %d: unknown link speed indication. PCIE LINK STATUS %x\n", + pcie_port->port_id, pcie_lnksta); + } + status->lanes = (pcie_lnksta & AL_PCI_EXP_LNKSTA_NLW) >> AL_PCI_EXP_LNKSTA_NLW_SHIFT; + al_info("PCIe %d: Link up. speed gen%d negotiated width %d\n", + pcie_port->port_id, status->speed, status->lanes); + + return 0; +} + +/** trigger hot reset */ +int al_pcie_link_hot_reset(struct al_pcie_port *pcie_port) +{ + al_err("PCIe %d: link hot reset not implemented\n", + pcie_port->port_id); + + return -ENOSYS; +} + +/* TODO: check if this function needed */ +int al_pcie_link_change_speed(struct al_pcie_port *pcie_port, + enum al_pcie_link_speed new_speed __attribute__((__unused__))) +{ + al_err("PCIe %d: link change speed not implemented\n", + pcie_port->port_id); + + return -ENOSYS; +} + +/* TODO: check if this function needed */ +int al_pcie_link_change_width(struct al_pcie_port *pcie_port, + uint8_t width __attribute__((__unused__))) +{ + al_err("PCIe %d: link change width not implemented\n", + pcie_port->port_id); + + return -ENOSYS; +} + +/** set target_bus and mask_target_bus */ +int al_pcie_target_bus_set(struct al_pcie_port *pcie_port, + uint8_t target_bus, + uint8_t mask_target_bus) +{ + struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs; + uint32_t reg; + + reg = al_reg_read32(®s->axi.ob_ctrl.cfg_target_bus); + AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK, + PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT, + mask_target_bus); + AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK, + PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT, + target_bus); + al_reg_write32(®s->axi.ob_ctrl.cfg_target_bus, reg); + return 0; +} + +/** get target_bus and mask_target_bus */ +int al_pcie_target_bus_get(struct al_pcie_port *pcie_port, + uint8_t *target_bus, + uint8_t *mask_target_bus) +{ + struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs; + uint32_t reg; + + al_assert(target_bus); + al_assert(mask_target_bus); + + reg = al_reg_read32(®s->axi.ob_ctrl.cfg_target_bus); + + *mask_target_bus = AL_REG_FIELD_GET(reg, + PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK, + PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT); + *target_bus = AL_REG_FIELD_GET(reg, + PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK, + PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT); + return 0; +} + +/** Set secondary bus number */ +int al_pcie_secondary_bus_set(struct al_pcie_port *pcie_port, uint8_t secbus) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + uint32_t secbus_val = (secbus << + PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_SHIFT); + + al_reg_write32_masked( + ®s->axi.ob_ctrl.cfg_control, + PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_MASK, + secbus_val); + return 0; +} + +/** Set sub-ordinary bus number */ +int al_pcie_subordinary_bus_set(struct al_pcie_port *pcie_port, uint8_t subbus) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + uint32_t subbus_val = (subbus << + PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_SHIFT); + + al_reg_write32_masked( + ®s->axi.ob_ctrl.cfg_control, + PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_MASK, + subbus_val); + return 0; +} + +/** get base address of pci configuration space header */ +int al_pcie_config_space_get(struct al_pcie_port *pcie_port, + uint8_t __iomem **addr) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + *addr = (uint8_t __iomem *)®s->core_space.config_header[0]; + return 0; +} + +/* Read data from the local configuration space */ +uint32_t al_pcie_cfg_emul_local_cfg_space_read( + struct al_pcie_port *pcie_port, + unsigned int reg_offset) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t data; + + data = al_reg_read32(®s->core_space.config_header[reg_offset]); + + return data; +} + +/* Write data to the local configuration space */ +void al_pcie_cfg_emul_local_cfg_space_write( + struct al_pcie_port *pcie_port, + unsigned int reg_offset, + uint32_t data, + al_bool ro) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t *offset = (ro == AL_FALSE) ? + (®s->core_space.config_header[reg_offset]) : + (®s->core_space.config_header[reg_offset] + (0x1000 >> 2)); + + al_reg_write32(offset, data); +} + +void al_pcie_axi_io_config(struct al_pcie_port *pcie_port, + al_phys_addr_t start, + al_phys_addr_t end) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_reg_write32(®s->axi.ob_ctrl.io_start_h, + (uint32_t)(((uint64_t)start >> 32) & 0xFFFFFFFF)); + + al_reg_write32(®s->axi.ob_ctrl.io_start_l, + (uint32_t)(start & 0xFFFFFFFF)); + + al_reg_write32(®s->axi.ob_ctrl.io_limit_h, + (uint32_t)(((uint64_t)end >> 32) & 0xFFFFFFFF)); + + al_reg_write32(®s->axi.ob_ctrl.io_limit_l, + (uint32_t)(end & 0xFFFFFFFF)); + + al_reg_write32_masked(®s->axi.ctrl.slv_ctl, + PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN, + PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN); +} + +/** program internal ATU region entry */ +int al_pcie_atu_region_set(struct al_pcie_port *pcie_port, struct al_pcie_atu_region *atu_region) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t reg = 0; + uint32_t limit_reg_val; + + /*TODO : add sanity check */ + AL_REG_FIELD_SET(reg, 0xF, 0, atu_region->index); + AL_REG_BIT_VAL_SET(reg, 31, atu_region->direction); + al_reg_write32(®s->core_space.port_regs.iatu.index, reg); + + al_reg_write32(®s->core_space.port_regs.iatu.lower_base_addr, + (uint32_t)(atu_region->base_addr & 0xFFFFFFFF)); + al_reg_write32(®s->core_space.port_regs.iatu.upper_base_addr, + (uint32_t)((atu_region->base_addr >> 32)& 0xFFFFFFFF)); + al_reg_write32(®s->core_space.port_regs.iatu.lower_target_addr, + (uint32_t)(atu_region->target_addr & 0xFFFFFFFF)); + al_reg_write32(®s->core_space.port_regs.iatu.upper_target_addr, + (uint32_t)((atu_region->target_addr >> 32)& 0xFFFFFFFF)); + + if (pcie_port->rev_id > AL_PCIE_REV_ID_0) { + uint32_t *limit_ext_reg = + (atu_region->direction == al_pcie_atu_dir_outbound) ? + ®s->app.atu.out_mask_pair[atu_region->index / 2] : + ®s->app.atu.in_mask_pair[atu_region->index / 2]; + uint32_t limit_ext_reg_mask = + (atu_region->index % 2) ? + PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK : + PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK; + unsigned int limit_ext_reg_shift = + (atu_region->index % 2) ? + PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT : + PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT; + uint64_t limit_sz = + atu_region->limit - atu_region->base_addr; + uint64_t limit_sz_msk = limit_sz - 1; + uint32_t limit_ext_reg_val = (uint32_t)(((limit_sz_msk) >> + 32) & 0xFFFFFFFF); + + if (limit_ext_reg_val) { + limit_reg_val = (uint32_t)((limit_sz_msk) & 0xFFFFFFFF); + al_assert(limit_reg_val == 0xFFFFFFFF); + } else { + limit_reg_val = (uint32_t)(atu_region->limit & + 0xFFFFFFFF); + } + + al_reg_write32_masked( + limit_ext_reg, + limit_ext_reg_mask, + limit_ext_reg_val << limit_ext_reg_shift); + } else { + limit_reg_val = (uint32_t)(atu_region->limit & 0xFFFFFFFF); + } + + al_reg_write32(®s->core_space.port_regs.iatu.limit_addr, + limit_reg_val); + + reg = 0; + AL_REG_FIELD_SET(reg, 0x1F, 0, atu_region->tlp_type); + AL_REG_FIELD_SET(reg, 0x3 << 9, 9, atu_region->attr); + al_reg_write32(®s->core_space.port_regs.iatu.cr1, reg); + + /* Enable/disable the region. */ + reg = 0; + AL_REG_FIELD_SET(reg, 0xFF, 0, atu_region->msg_code); + AL_REG_FIELD_SET(reg, 0x700, 8, atu_region->bar_number); + AL_REG_BIT_VAL_SET(reg, 16, atu_region->enable_attr_match_mode == AL_TRUE); + AL_REG_BIT_VAL_SET(reg, 21, atu_region->enable_msg_match_mode == AL_TRUE); + AL_REG_BIT_VAL_SET(reg, 28, atu_region->cfg_shift_mode == AL_TRUE); + AL_REG_BIT_VAL_SET(reg, 29, atu_region->invert_matching == AL_TRUE); + if (atu_region->tlp_type == AL_PCIE_TLP_TYPE_MEM || atu_region->tlp_type == AL_PCIE_TLP_TYPE_IO) + AL_REG_BIT_VAL_SET(reg, 30, !!atu_region->match_mode); + AL_REG_BIT_VAL_SET(reg, 31, !!atu_region->enable); + + al_reg_write32(®s->core_space.port_regs.iatu.cr2, reg); + + return 0; +} + +/** generate INTx Assert/DeAssert Message */ +int al_pcie_legacy_int_gen(struct al_pcie_port *pcie_port, al_bool assert, + enum al_pcie_legacy_int_type type) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t reg; + + al_assert(type == AL_PCIE_LEGACY_INTA); /* only INTA supported */ + reg = al_reg_read32(®s->app.global_ctrl.events_gen); + AL_REG_BIT_VAL_SET(reg, 3, !!assert); + al_reg_write32(®s->app.global_ctrl.events_gen, reg); + + return 0; +} + +/** generate MSI interrupt */ +int al_pcie_msi_int_gen(struct al_pcie_port *pcie_port, uint8_t vector) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t reg; + + /* set msi vector and clear MSI request */ + reg = al_reg_read32(®s->app.global_ctrl.events_gen); + AL_REG_BIT_CLEAR(reg, 4); + AL_REG_FIELD_SET(reg, + PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_MASK, + PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_SHIFT, + vector); + al_reg_write32(®s->app.global_ctrl.events_gen, reg); + /* set MSI request */ + AL_REG_BIT_SET(reg, 4); + al_reg_write32(®s->app.global_ctrl.events_gen, reg); + + return 0; +} + +/** configure MSIX capability */ +int al_pcie_msix_config( + struct al_pcie_port *pcie_port, + struct al_pcie_msix_params *msix_params) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t msix_reg0 = al_reg_read32(®s->core_space.msix_cap_base); + + msix_reg0 &= ~(AL_PCI_MSIX_MSGCTRL_TBL_SIZE << AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT); + msix_reg0 |= ((msix_params->table_size - 1) & AL_PCI_MSIX_MSGCTRL_TBL_SIZE) << + AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT; + al_reg_write32(®s->core_space.msix_cap_base, msix_reg0); + + /* Table offset & BAR */ + al_reg_write32(®s->core_space.msix_cap_base + (AL_PCI_MSIX_TABLE >> 2), + (msix_params->table_offset & AL_PCI_MSIX_TABLE_OFFSET) | + (msix_params->table_bar & AL_PCI_MSIX_TABLE_BAR)); + /* PBA offset & BAR */ + al_reg_write32(®s->core_space.msix_cap_base + (AL_PCI_MSIX_PBA >> 2), + (msix_params->pba_offset & AL_PCI_MSIX_PBA_OFFSET) | + (msix_params->pba_bar & AL_PCI_MSIX_PBA_BAR)); + + return 0; +} + +/** check whether MSIX is enabled */ +al_bool al_pcie_msix_enabled(struct al_pcie_port *pcie_port) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t msix_reg0 = al_reg_read32(®s->core_space.msix_cap_base); + + if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_EN) + return AL_TRUE; + return AL_FALSE; +} + +/** check whether MSIX is masked */ +al_bool al_pcie_msix_masked(struct al_pcie_port *pcie_port) +{ + struct al_pcie_regs *regs = pcie_port->regs; + uint32_t msix_reg0 = al_reg_read32(®s->core_space.msix_cap_base); + + if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_MASK) + return AL_TRUE; + return AL_FALSE; +} + +/********************** Loopback mode (RC and Endpoint modes) ************/ + +/** enter local pipe loopback mode */ +int al_pcie_local_pipe_loopback_enter(struct al_pcie_port *pcie_port) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_dbg("PCIe %d: Enter LOCAL PIPE Loopback mode", pcie_port->port_id); + + al_reg_write32_masked(®s->core_space.port_regs.pipe_loopback_ctrl, + 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT, + 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT); + + al_reg_write32_masked(®s->core_space.port_regs.port_link_ctrl, + 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT, + 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT); + + return 0; +} + +/** + * @brief exit local pipe loopback mode + * + * @param pcie_port pcie port handle + * @return 0 if no error found + */ +int al_pcie_local_pipe_loopback_exit(struct al_pcie_port *pcie_port) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_dbg("PCIe %d: Exit LOCAL PIPE Loopback mode", pcie_port->port_id); + + al_reg_write32_masked(®s->core_space.port_regs.pipe_loopback_ctrl, + 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT, + 0); + + al_reg_write32_masked(®s->core_space.port_regs.port_link_ctrl, + 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT, + 0); + return 0; +} + +/** enter remote loopback mode */ +int al_pcie_remote_loopback_enter(struct al_pcie_port *pcie_port) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_dbg("PCIe %d: Enter REMOTE Loopback mode", pcie_port->port_id); + + al_reg_write32_masked(®s->core_space.port_regs.port_link_ctrl, + 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT, + 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT); + + return 0; +} + + +/** + * @brief exit remote loopback mode + * + * @param pcie_port pcie port handle + * @return 0 if no error found + */ +int al_pcie_remote_loopback_exit(struct al_pcie_port *pcie_port) +{ + struct al_pcie_regs *regs = pcie_port->regs; + + al_dbg("PCIe %d: Exit REMOTE Loopback mode", pcie_port->port_id); + + al_reg_write32_masked(®s->core_space.port_regs.port_link_ctrl, + 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT, + 0); + return 0; +} diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_axi_reg.h b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_axi_reg.h new file mode 100644 index 00000000000000..240bc19e96a2be --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_axi_reg.h @@ -0,0 +1,538 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + + +#ifndef __AL_PCIE_HAL_AXI_REG_H__ +#define __AL_PCIE_HAL_AXI_REG_H__ + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + + + +struct al_pcie_axi_ctrl { + /* [0x0] */ + uint32_t global; + uint32_t rsrvd_0; + /* [0x8] */ + uint32_t master_bctl; + /* [0xc] */ + uint32_t master_rctl; + /* [0x10] */ + uint32_t master_ctl; + /* [0x14] */ + uint32_t master_arctl; + /* [0x18] */ + uint32_t master_awctl; + /* [0x1c] */ + uint32_t slave_rctl; + /* [0x20] */ + uint32_t slv_wctl; + /* [0x24] */ + uint32_t slv_ctl; + /* [0x28] */ + uint32_t dbi_ctl; + /* [0x2c] */ + uint32_t vmid_mask; + uint32_t rsrvd[4]; +}; +struct al_pcie_axi_ob_ctrl { + /* [0x0] */ + uint32_t cfg_target_bus; + /* [0x4] */ + uint32_t cfg_control; + /* [0x8] */ + uint32_t io_start_l; + /* [0xc] */ + uint32_t io_start_h; + /* [0x10] */ + uint32_t io_limit_l; + /* [0x14] */ + uint32_t io_limit_h; + /* [0x18] */ + uint32_t msg_start_l; + /* [0x1c] */ + uint32_t msg_start_h; + /* [0x20] */ + uint32_t msg_limit_l; + /* [0x24] */ + uint32_t msg_limit_h; + uint32_t rsrvd[6]; +}; +struct al_pcie_axi_msg { + /* [0x0] */ + uint32_t addr_high; + /* [0x4] */ + uint32_t addr_low; + /* [0x8] */ + uint32_t type; +}; +struct al_pcie_axi_pcie_status { + /* [0x0] */ + uint32_t debug; +}; +struct al_pcie_axi_rd_parity { + /* [0x0] */ + uint32_t log_high; + /* [0x4] */ + uint32_t log_low; +}; +struct al_pcie_axi_rd_cmpl { + /* [0x0] */ + uint32_t cmpl_log_high; + /* [0x4] */ + uint32_t cmpl_log_low; +}; +struct al_pcie_axi_rd_to { + /* [0x0] */ + uint32_t to_log_high; + /* [0x4] */ + uint32_t to_log_low; +}; +struct al_pcie_axi_wr_cmpl { + /* [0x0] */ + uint32_t wr_cmpl_log_high; + /* [0x4] */ + uint32_t wr_cmpl_log_low; +}; +struct al_pcie_axi_wr_to { + /* [0x0] */ + uint32_t wr_to_log_high; + /* [0x4] */ + uint32_t wr_to_log_low; +}; +struct al_pcie_axi_pcie_global { + /* [0x0] */ + uint32_t conf; +}; +struct al_pcie_axi_status { + /* [0x0] */ + uint32_t lane0; + /* [0x4] */ + uint32_t lane1; + /* [0x8] */ + uint32_t lane2; + /* [0xc] */ + uint32_t lane3; +}; +struct al_pcie_axi_conf { + /* [0x0] */ + uint32_t zero_lane0; + /* [0x4] */ + uint32_t zero_lane1; + /* [0x8] */ + uint32_t zero_lane2; + /* [0xc] */ + uint32_t zero_lane3; + /* [0x10] */ + uint32_t one_lane0; + /* [0x14] */ + uint32_t one_lane1; + /* [0x18] */ + uint32_t one_lane2; + /* [0x1c] */ + uint32_t one_lane3; +}; +struct al_pcie_axi_parity { + /* [0x0] */ + uint32_t en_axi; + /* [0x4] */ + uint32_t status_axi; +}; +struct al_pcie_axi_pos_logged { + /* [0x0] */ + uint32_t error_low; + /* [0x4] */ + uint32_t error_high; +}; +struct al_pcie_axi_ordering { + /* [0x0] */ + uint32_t pos_cntl; +}; +struct al_pcie_axi_link_down { + /* [0x0] */ + uint32_t reset_extend; +}; +struct al_pcie_axi_pre_configuration { + /* [0x0] */ + uint32_t pcie_core_setup; +}; +struct al_pcie_axi_init_fc { + /* [0x0] The sum of all the fields below must be 97 */ + uint32_t cfg; +}; +struct al_pcie_axi_int_grp_a_axi { + /* [0x0] Interrupt Cause RegisterSet by hardware */ + uint32_t cause; + uint32_t rsrvd_0; + /* [0x8] Interrupt Cause Set RegisterWriting 1 to a bit in t ... */ + uint32_t cause_set; + uint32_t rsrvd_1; + /* [0x10] Interrupt Mask RegisterIf Auto-mask control bit =TR ... */ + uint32_t mask; + uint32_t rsrvd_2; + /* [0x18] Interrupt Mask Clear RegisterUsed when auto-mask co ... */ + uint32_t mask_clear; + uint32_t rsrvd_3; + /* [0x20] Interrupt Status RegisterThis register latches the ... */ + uint32_t status; + uint32_t rsrvd_4; + /* [0x28] Interrupt Control Register */ + uint32_t control; + uint32_t rsrvd_5; + /* [0x30] Interrupt Mask RegisterEach bit in this register ma ... */ + uint32_t abort_mask; + uint32_t rsrvd_6; + /* [0x38] Interrupt Log RegisterEach bit in this register mas ... */ + uint32_t log_mask; + uint32_t rsrvd; +}; + +struct al_pcie_axi_regs { + struct al_pcie_axi_ctrl ctrl; /* [0x0] */ + struct al_pcie_axi_ob_ctrl ob_ctrl; /* [0x40] */ + uint32_t rsrvd_0[4]; + struct al_pcie_axi_msg msg; /* [0x90] */ + struct al_pcie_axi_pcie_status pcie_status; /* [0x9c] */ + struct al_pcie_axi_rd_parity rd_parity; /* [0xa0] */ + struct al_pcie_axi_rd_cmpl rd_cmpl; /* [0xa8] */ + struct al_pcie_axi_rd_to rd_to; /* [0xb0] */ + struct al_pcie_axi_wr_cmpl wr_cmpl; /* [0xb8] */ + struct al_pcie_axi_wr_to wr_to; /* [0xc0] */ + struct al_pcie_axi_pcie_global pcie_global; /* [0xc8] */ + struct al_pcie_axi_status status; /* [0xcc] */ + struct al_pcie_axi_conf conf; /* [0xdc] */ + struct al_pcie_axi_parity parity; /* [0xfc] */ + struct al_pcie_axi_pos_logged pos_logged; /* [0x104] */ + struct al_pcie_axi_ordering ordering; /* [0x10c] */ + struct al_pcie_axi_link_down link_down; /* [0x110] */ + struct al_pcie_axi_pre_configuration pre_configuration; /* [0x114] */ + struct al_pcie_axi_init_fc init_fc; /* [0x118] */ + uint32_t rsrvd_1[57]; + struct al_pcie_axi_int_grp_a_axi int_grp_a; /* [0x200] */ +}; + + +/* +* Registers Fields +*/ + + +/**** Global register ****/ +/* Not in use */ +#define PCIE_AXI_CTRL_GLOBAL_CPL_AFTER_P_ORDER_DIS (1 << 0) +/* Not in use */ +#define PCIE_AXI_CTRL_GLOBAL_CPU_CPL_ONLY_EN (1 << 1) +/* When linked down, map all transactions to PCIe to DEC ERR. */ +#define PCIE_AXI_CTRL_GLOBAL_BLOCK_PCIE_SLAVE_EN (1 << 2) +/* Wait for the NIC to flush before enabling reset to the PCIe c ... */ +#define PCIE_AXI_CTRL_GLOBAL_WAIT_SLV_FLUSH_EN (1 << 3) +/* When the BME is cleared and this bit is set, it causes all tr ... */ +#define PCIE_AXI_CTRL_GLOBAL_MEM_BAR_MAP_TO_ERR (1 << 4) +/* Wait for the DBI port (the port that enables access to the in ... */ +#define PCIE_AXI_CTRL_GLOBAL_WAIT_DBI_FLUSH_EN (1 << 5) +/* When set, adds parity on the write and read address channels, ... */ +#define PCIE_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR (1 << 16) +/* When set, enables parity check on the read data. */ +#define PCIE_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD (1 << 17) +/* When set, adds parity on the RD data channel. */ +#define PCIE_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV (1 << 18) +/* When set, enables parity check on the write data. */ +#define PCIE_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR (1 << 19) +/* When set, error track for timeout and parity is disabled, i */ +#define PCIE_AXI_CTRL_GLOBAL_ERROR_TRACK_DIS (1 << 20) + +/**** Master_Arctl register ****/ +/* override arcache */ +#define PCIE_AXI_CTRL_MASTER_ARCTL_OVR_ARCACHE (1 << 0) +/* arache value */ +#define PCIE_AXI_CTRL_MASTER_ARCTL_ARACHE_VA_MASK 0x0000001E +#define PCIE_AXI_CTRL_MASTER_ARCTL_ARACHE_VA_SHIFT 1 +/* arprot override */ +#define PCIE_AXI_CTRL_MASTER_ARCTL_ARPROT_OVR (1 << 5) +/* arprot value */ +#define PCIE_AXI_CTRL_MASTER_ARCTL_ARPROT_VALUE_MASK 0x000001C0 +#define PCIE_AXI_CTRL_MASTER_ARCTL_ARPROT_VALUE_SHIFT 6 +/* vmid val */ +#define PCIE_AXI_CTRL_MASTER_ARCTL_VMID_VAL_MASK 0x01FFFE00 +#define PCIE_AXI_CTRL_MASTER_ARCTL_VMID_VAL_SHIFT 9 +/* IPA value */ +#define PCIE_AXI_CTRL_MASTER_ARCTL_IPA_VAL (1 << 25) +/* overide snoop inidcation, if not set take it from mstr_armisc ... */ +#define PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP (1 << 26) +/* +snoop indication value when override */ +#define PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP (1 << 27) +/* +arqos value */ +#define PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_MASK 0xF0000000 +#define PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_SHIFT 28 + +/**** Master_Awctl register ****/ +/* override arcache */ +#define PCIE_AXI_CTRL_MASTER_AWCTL_OVR_ARCACHE (1 << 0) +/* awache value */ +#define PCIE_AXI_CTRL_MASTER_AWCTL_AWACHE_VA_MASK 0x0000001E +#define PCIE_AXI_CTRL_MASTER_AWCTL_AWACHE_VA_SHIFT 1 +/* awprot override */ +#define PCIE_AXI_CTRL_MASTER_AWCTL_AWPROT_OVR (1 << 5) +/* awprot value */ +#define PCIE_AXI_CTRL_MASTER_AWCTL_AWPROT_VALUE_MASK 0x000001C0 +#define PCIE_AXI_CTRL_MASTER_AWCTL_AWPROT_VALUE_SHIFT 6 +/* vmid val */ +#define PCIE_AXI_CTRL_MASTER_AWCTL_VMID_VAL_MASK 0x01FFFE00 +#define PCIE_AXI_CTRL_MASTER_AWCTL_VMID_VAL_SHIFT 9 +/* IPA value */ +#define PCIE_AXI_CTRL_MASTER_AWCTL_IPA_VAL (1 << 25) +/* overide snoop inidcation, if not set take it from mstr_armisc ... */ +#define PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP (1 << 26) +/* +snoop indication value when override */ +#define PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP (1 << 27) +/* +awqos value */ +#define PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_MASK 0xF0000000 +#define PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_SHIFT 28 + +/**** slv_ctl register ****/ +#define PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN (1 << 6) + +/**** Cfg_Target_Bus register ****/ +/* Defines which MSBs to complete the number of the bust that ar ... */ +#define PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK 0x000000FF +#define PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT 0 +/* Target bus number for outbound configuration type0 and type1 ... */ +#define PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK 0x0000FF00 +#define PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT 8 + +/**** Cfg_Control register ****/ +/* Primary bus number */ +#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_PBUS_MASK 0x000000FF +#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_PBUS_SHIFT 0 +/* +Subordinate bus number */ +#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_MASK 0x0000FF00 +#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_SHIFT 8 +/* Secondary bus nnumber */ +#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_MASK 0x00FF0000 +#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_SHIFT 16 +/* Enable outbound configuration access through iATU. */ +#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_IATU_EN (1 << 31) + +/**** IO_Start_H register ****/ +/* +Outbound ATIU I/O start address high */ +#define PCIE_AXI_MISC_OB_CTRL_IO_START_H_ADDR_MASK 0x000003FF +#define PCIE_AXI_MISC_OB_CTRL_IO_START_H_ADDR_SHIFT 0 + +/**** IO_Limit_H register ****/ +/* +Outbound ATIU I/O limit address high */ +#define PCIE_AXI_MISC_OB_CTRL_IO_LIMIT_H_ADDR_MASK 0x000003FF +#define PCIE_AXI_MISC_OB_CTRL_IO_LIMIT_H_ADDR_SHIFT 0 + +/**** Msg_Start_H register ****/ +/* +Outbound ATIU msg-no-data start address high */ +#define PCIE_AXI_MISC_OB_CTRL_MSG_START_H_ADDR_MASK 0x000003FF +#define PCIE_AXI_MISC_OB_CTRL_MSG_START_H_ADDR_SHIFT 0 + +/**** Msg_Limit_H register ****/ +/* +Outbound ATIU msg-no-data limit address high */ +#define PCIE_AXI_MISC_OB_CTRL_MSG_LIMIT_H_ADDR_MASK 0x000003FF +#define PCIE_AXI_MISC_OB_CTRL_MSG_LIMIT_H_ADDR_SHIFT 0 + +/**** type register ****/ +/* Type of message */ +#define PCIE_AXI_MISC_MSG_TYPE_TYPE_MASK 0x00FFFFFF +#define PCIE_AXI_MISC_MSG_TYPE_TYPE_SHIFT 0 +/* Reserved */ +#define PCIE_AXI_MISC_MSG_TYPE_RSRVD_MASK 0xFF000000 +#define PCIE_AXI_MISC_MSG_TYPE_RSRVD_SHIFT 24 + +/**** debug register ****/ +/* Causes ACI PCIe reset, including ,master/slave/DBI (registers ... */ +#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_AXI_BRIDGE_RESET (1 << 0) +/* Causes reset of the entire PCIe core (including the AXI bridg ... */ +#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_CORE_RESET (1 << 1) +/* Indicates that the SB is empty from the request to the PCIe ( ... */ +#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_SB_FLUSH_OB_STATUS (1 << 2) +/* MAP and transaction to the PCIe core to ERROR. */ +#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_SB_MAP_TO_ERR (1 << 3) +/* Indicates that the pcie_core clock is gated off */ +#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_CORE_CLK_GATE_OFF (1 << 4) +/* Reserved */ +#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_RSRVD_MASK 0xFFFFFFE0 +#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_RSRVD_SHIFT 5 + +/**** conf register ****/ +/* Device TypeIndicates the specific type of this PCI Express Fu ... */ +#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK 0x0000000F +#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT 0 +/* [4] – Lane 0 active[5] – Lave 1 active[6] – Lane 2 active[7] ... */ +#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK 0x000000F0 +#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT 4 +/* [8] SD to the memories */ +#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN 0x100 +#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_RESERVED_MASK 0xFFFFFE00 +#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_RESERVED_SHIFT 9 + +/**** zero_laneX register ****/ +/* phy_mac_local_fs */ +#define PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_MASK 0x0000003f +#define PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_SHIFT 0 +/* phy_mac_local_lf */ +#define PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_MASK 0x00000fc0 +#define PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_SHIFT 6 + +/**** pos_cntl register ****/ +/* Disables POS. */ +#define PCIE_AXI_POS_ORDER_AXI_POS_BYPASS (1 << 0) +/* Clear the POS data structure. */ +#define PCIE_AXI_POS_ORDER_AXI_POS_CLEAR (1 << 1) +/* Read push all write. */ +#define PCIE_AXI_POS_ORDER_AXI_POS_RSO_ENABLE (1 << 2) +/* Causes the PCIe core to wait for all the BRESPs before issuin ... */ +#define PCIE_AXI_POS_ORDER_AXI_DW_RD_FLUSH_WR (1 << 3) +/* When set, to 1'b1 supports interleaving data return from the ... */ +#define PCIE_AXI_POS_ORDER_RD_CMPL_AFTER_WR_SUPPORT_RD_INTERLV (1 << 4) +/* When set, to 1'b1 disables read completion after write orderi ... */ +#define PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX (1 << 5) +/* When set, disables EP mode read cmpl on the master port push ... */ +#define PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS (1 << 6) +/* When set, disables EP mode read cmpl on the master port push ... */ +#define PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_SUPPORT_INTERLV_DIS (1 << 7) +/* When set disable the ATS CAP. */ +#define PCIE_AXI_CORE_SETUP_ATS_CAP_DIS AL_BIT(13) + +/**** pcie_core_setup register ****/ +/* This Value delay the rate change to the serdes, until the EIO ... */ +#define PCIE_AXI_CORE_SETUP_DELAY_MAC_PHY_RATE_MASK 0x000000FF +#define PCIE_AXI_CORE_SETUP_DELAY_MAC_PHY_RATE_SHIFT 0 +/* Limit the number of outstanding AXI reads that the PCIe core ... */ +#define PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_MASK 0x0000FF00 +#define PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_SHIFT 8 +/* Enable the sriov feature */ +#define PCIE_AXI_CORE_SETUP_SRIOV_ENABLE AL_BIT(16) + +/**** cfg register ****/ +/* This value set the possible out standing headers writes (post ... */ +#define PCIE_AXI_INIT_FC_CFG_NOF_P_HDR_MASK 0x0000007F +#define PCIE_AXI_INIT_FC_CFG_NOF_P_HDR_SHIFT 0 +/* This value set the possible out standing headers reads (non-p ... */ +#define PCIE_AXI_INIT_FC_CFG_NOF_NP_HDR_MASK 0x00003F80 +#define PCIE_AXI_INIT_FC_CFG_NOF_NP_HDR_SHIFT 7 +/* This value set the possible out standing headers CMPLs , the ... */ +#define PCIE_AXI_INIT_FC_CFG_NOF_CPL_HDR_MASK 0x001FC000 +#define PCIE_AXI_INIT_FC_CFG_NOF_CPL_HDR_SHIFT 14 + +#define PCIE_AXI_INIT_FC_CFG_RSRVD_MASK 0xFFE00000 +#define PCIE_AXI_INIT_FC_CFG_RSRVD_SHIFT 21 + +/**** int_cause_grp_A_axi register ****/ +/* Master Response Composer Lookup ErrorOverflow that occurred i ... */ +#define PCIE_AXI_INT_GRP_A_CAUSE_GM_COMPOSER_LOOKUP_ERR (1 << 0) +/* Indicates a PARITY ERROR on the master data read channel */ +#define PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_DATA_PATH_RD (1 << 2) +/* Indicates a PARITY ERROR on the slave addr read channel */ +#define PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_RD (1 << 3) +/* Indicates a PARITY ERROR on the slave addr write channel */ +#define PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_WR (1 << 4) +/* Indicates a PARITY ERROR on the slave data write channel */ +#define PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_DATA_WR (1 << 5) +/* Reserved */ +#define PCIE_AXI_INT_GRP_A_CAUSE_RESERVED_6 (1 << 6) +/* Software error: ECAM write request with invalid bus number */ +#define PCIE_AXI_INT_GRP_A_CAUSE_SW_ECAM_ERR_RD (1 << 7) +/* Software error: ECAM read request with invalid bus number */ +#define PCIE_AXI_INT_GRP_A_CAUSE_SW_ECAM_ERR_WR (1 << 8) +/* Indicates an ERROR in the PCIe application cause register. */ +#define PCIE_AXI_INT_GRP_A_CAUSE_PCIE_CORE_INT (1 << 9) +/* Whenever the Master AXI finishes writing a message, it sets t ... */ +#define PCIE_AXI_INT_GRP_A_CAUSE_MSTR_AXI_GETOUT_MSG (1 << 10) +/* Read AXI compilation has ERROR. */ +#define PCIE_AXI_INT_GRP_A_CAUSE_RD_CMPL_ERR (1 << 11) +/* Write AXI compilation has ERROR. */ +#define PCIE_AXI_INT_GRP_A_CAUSE_WR_CMPL_ERR (1 << 12) +/* Read AXI compilation has timed out. */ +#define PCIE_AXI_INT_GRP_A_CAUSE_RD_CMPL_TO (1 << 13) +/* Write AXI compilation has timed out. */ +#define PCIE_AXI_INT_GRP_A_CAUSE_WR_CMPL_TO (1 << 14) +/* Parity error AXI domain */ +#define PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERROR_AXI (1 << 15) +/* POS error interrupt */ +#define PCIE_AXI_INT_GRP_A_CAUSE_POS_AXI_BRESP (1 << 16) +/* The outstanding write counter become full should never happe ... */ +#define PCIE_AXI_INT_GRP_A_CAUSE_WRITE_CNT_FULL_ERR (1 << 17) +/* BRESP received before the write counter increment. */ +#define PCIE_AXI_INT_GRP_A_CAUSE_BRESP_BEFORE_WR_CNT_INC_ERR (1 << 18) + +/**** int_control_grp_A_axi register ****/ +/* When Clear_on_Read =1, all bits of the Cause register are cle ... */ +#define PCIE_AXI_INT_GRP_A_CTRL_CLEAR_ON_READ (1 << 0) +/* (Must be set only when MSIX is enabled */ +#define PCIE_AXI_INT_GRP_A_CTRL_AUTO_MASK (1 << 1) +/* Auto_Clear (RW)When Auto-Clear =1, the bits in the Interrupt ... */ +#define PCIE_AXI_INT_GRP_A_CTRL_AUTO_CLEAR (1 << 2) +/* When set,_on_Posedge =1, the bits in the Interrupt Cause regi ... */ +#define PCIE_AXI_INT_GRP_A_CTRL_SET_ON_POS (1 << 3) +/* When Moderation_Reset =1, all Moderation timers associated wi ... */ +#define PCIE_AXI_INT_GRP_A_CTRL_MOD_RST (1 << 4) +/* When mask_msi_x =1, no MSI-X from this group is sent */ +#define PCIE_AXI_INT_GRP_A_CTRL_MASK_MSI_X (1 << 5) +/* MSI-X AWID value. Same ID for all cause bits. */ +#define PCIE_AXI_INT_GRP_A_CTRL_AWID_MASK 0x00000F00 +#define PCIE_AXI_INT_GRP_A_CTRL_AWID_SHIFT 8 +/* This value determines the interval between interrupts */ +#define PCIE_AXI_INT_GRP_A_CTRL_MOD_INTV_MASK 0x00FF0000 +#define PCIE_AXI_INT_GRP_A_CTRL_MOD_INTV_SHIFT 16 +/* This value determines the Moderation_Timer_Clock speed */ +#define PCIE_AXI_INT_GRP_A_CTRL_MOD_RES_MASK 0x0F000000 +#define PCIE_AXI_INT_GRP_A_CTRL_MOD_RES_SHIFT 24 + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_pcie_axi_REG_H */ + +/** @} end of ... group */ + + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_interrupts.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_interrupts.c new file mode 100644 index 00000000000000..7c7fc231d33b39 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_interrupts.c @@ -0,0 +1,73 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#include +#include "al_hal_pcie_interrupts.h" +#include "al_hal_pcie_regs.h" + +/* Enable PCIe controller interrupts */ +int al_pcie_ints_config(struct al_pcie_port *pcie_port) +{ + al_iofic_config(pcie_port->app_int_grp_a_base, 0, + INT_CONTROL_GRP_SET_ON_POSEDGE); + al_iofic_config(pcie_port->app_int_grp_b_base, 0, 0); + al_iofic_config(pcie_port->axi_int_grp_a_base, 0, 0); + + return 0; +} + +void al_pcie_app_int_grp_a_unmask(struct al_pcie_port *pcie_port, + uint32_t int_mask) +{ + al_iofic_unmask(pcie_port->app_int_grp_a_base, 0, int_mask); +} + +void al_pcie_app_int_grp_a_mask(struct al_pcie_port *pcie_port, + uint32_t int_mask) +{ + al_iofic_mask(pcie_port->app_int_grp_a_base, 0, int_mask); +} + +void al_pcie_app_int_grp_b_unmask(struct al_pcie_port *pcie_port, + uint32_t int_mask) +{ + al_iofic_unmask(pcie_port->app_int_grp_b_base, 0, int_mask); +} + +void al_pcie_app_int_grp_b_mask(struct al_pcie_port *pcie_port, + uint32_t int_mask) +{ + al_iofic_mask(pcie_port->app_int_grp_b_base, 0, int_mask); +} diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_interrupts.h b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_interrupts.h new file mode 100644 index 00000000000000..51cf905c7fb862 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_interrupts.h @@ -0,0 +1,157 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#ifndef _AL_HAL_PCIE_INTERRUPTS_H_ +#define _AL_HAL_PCIE_INTERRUPTS_H_ + +#include +#include +#include + +/** + * @defgroup group_pcie_interrupts PCIe interrupts + * @ingroup grouppcie + * @{ + * The PCIe interrupts HAL can be used to control PCIe unit interrupts. + * There are 3 groups of interrupts: app group A, app group B and AXI. + * + * @file al_hal_pcie_interrupts.h + * + */ + +/** App group A interrupts mask - don't change */ +enum al_pcie_app_int_grp_a { + AL_PCIE_APP_INT_DEASSERT_INTD = AL_BIT(0), + AL_PCIE_APP_INT_DEASSERT_INTC = AL_BIT(1), + AL_PCIE_APP_INT_DEASSERT_INTB = AL_BIT(2), + AL_PCIE_APP_INT_DEASSERT_INTA = AL_BIT(3), + AL_PCIE_APP_INT_ASSERT_INTD = AL_BIT(4), + AL_PCIE_APP_INT_ASSERT_INTC = AL_BIT(5), + AL_PCIE_APP_INT_ASSERT_INTB = AL_BIT(6), + AL_PCIE_APP_INT_ASSERT_INTA = AL_BIT(7), + AL_PCIE_APP_INT_MSI_CNTR_RCV_INT = AL_BIT(8), + AL_PCIE_APP_INT_MSI_TRNS_GNT = AL_BIT(9), + AL_PCIE_APP_INT_SYS_ERR_RC = AL_BIT(10), + AL_PCIE_APP_INT_FLR_PF_ACTIVE = AL_BIT(11), + AL_PCIE_APP_INT_AER_RC_ERR = AL_BIT(12), + AL_PCIE_APP_INT_AER_RC_ERR_MSI = AL_BIT(13), + AL_PCIE_APP_INT_WAKE = AL_BIT(14), + AL_PCIE_APP_INT_PME_INT = AL_BIT(15), + AL_PCIE_APP_INT_PME_MSI = AL_BIT(16), + AL_PCIE_APP_INT_HP_PME = AL_BIT(17), + AL_PCIE_APP_INT_HP_INT = AL_BIT(18), + AL_PCIE_APP_INT_HP_MSI = AL_BIT(19), + AL_PCIE_APP_INT_VPD_INT = AL_BIT(20), + AL_PCIE_APP_INT_LINK_DOWN = AL_BIT(21), + AL_PCIE_APP_INT_PM_XTLH_BLOCK_TLP = AL_BIT(22), + AL_PCIE_APP_INT_XMLH_LINK_UP = AL_BIT(23), + AL_PCIE_APP_INT_RDLH_LINK_UP = AL_BIT(24), + AL_PCIE_APP_INT_LTSSM_RCVRY_STATE = AL_BIT(25), + AL_PCIE_APP_INT_CFG_WR = AL_BIT(26), + AL_PCIE_APP_INT_CFG_EMUL = AL_BIT(31), +}; + +/** + * @brief Initialize and configure PCIe controller interrupts + * + * @param pcie_port pcie port handle + * + * @return 0 if no error found + */ +int al_pcie_ints_config(struct al_pcie_port *pcie_port); + +/** + * Unmask PCIe app group a interrupts + * + * @param pcie_port pcie port handle + * @param int_mask interrupt mask. + */ +void al_pcie_app_int_grp_a_unmask(struct al_pcie_port *pcie_port, + uint32_t int_mask); + +/** + * Mask PCIe app group a interrupts + * + * @param pcie_port pcie port handle + * @param int_mask interrupt mask. + */ +void al_pcie_app_int_grp_a_mask(struct al_pcie_port *pcie_port, + uint32_t int_mask); + +/** + * Unmask PCIe app group b interrupts + * + * @param pcie_port pcie port handle + * @param int_mask interrupt mask. + */ +void al_pcie_app_int_grp_b_unmask(struct al_pcie_port *pcie_port, + uint32_t int_mask); + +/** + * Mask PCIe app group b interrupts + * + * @param pcie_port pcie port handle + * @param int_mask interrupt mask. + */ +void al_pcie_app_int_grp_b_mask(struct al_pcie_port *pcie_port, + uint32_t int_mask); + +/** + * Clear the PCIe app group a interrupt cause + * + * @param pcie_port pcie port handle + * @param int_cause interrupt cause register bits to clear + */ +static INLINE void al_pcie_app_int_grp_a_cause_clear( + struct al_pcie_port *pcie_port, + uint32_t int_cause) +{ + al_iofic_clear_cause(pcie_port->app_int_grp_a_base, 0, int_cause); +} + +/** + * Read PCIe app group a interrupt cause + * + * @param pcie_port pcie port handle + * @return interrupt cause mask + */ +static INLINE uint32_t al_pcie_app_int_grp_a_cause_read( + struct al_pcie_port *pcie_port) +{ + return al_iofic_read_cause(pcie_port->app_int_grp_a_base, 0); +} + +#endif +/** @} end of group_pcie_interrupts group */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_regs.h b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_regs.h new file mode 100644 index 00000000000000..7ac1c3e612a470 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_regs.h @@ -0,0 +1,182 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#ifndef __AL_HAL_PCIE_REGS_H__ +#define __AL_HAL_PCIE_REGS_H__ + +#include "al_hal_pcie_axi_reg.h" +#include "al_hal_pcie_w_reg.h" + +#define AL_PCIE_AXI_REGS_OFFSET 0x0 +#define AL_PCIE_APP_REGS_OFFSET 0x1000 +#define AL_PCIE_CORE_CONF_BASE_OFFSET 0x2000 + +struct al_pcie_core_iatu_regs { + uint32_t index; + uint32_t cr1; + uint32_t cr2; + uint32_t lower_base_addr; + uint32_t upper_base_addr; + uint32_t limit_addr; + uint32_t lower_target_addr; + uint32_t upper_target_addr; + uint32_t cr3; + uint32_t rsrvd[(0x270 - 0x224) >> 2]; +}; + +struct al_pcie_core_port_regs { + uint32_t ack_lat_rply_timer; + uint32_t reserved1[(0x10 - 0x4) >> 2]; + uint32_t port_link_ctrl; + uint32_t reserved2[(0x1c - 0x14) >> 2]; + uint32_t filter_mask_reg_1; + uint32_t reserved3[(0x48 - 0x20) >> 2]; + uint32_t vc0_posted_rcv_q_ctrl; + uint32_t vc0_non_posted_rcv_q_ctrl; + uint32_t vc0_comp_rcv_q_ctrl; + uint32_t reserved4[(0x10C - 0x54) >> 2]; + uint32_t gen2_ctrl; + uint32_t reserved5[(0x190 - 0x110) >> 2]; + uint32_t gen3_ctrl; + uint32_t gen3_eq_fs_lf; + uint32_t gen3_eq_preset_to_coef_map; + uint32_t gen3_eq_preset_idx; + uint32_t reserved6; + uint32_t gen3_eq_status; + uint32_t gen3_eq_ctrl; + uint32_t reserved7[(0x1B8 - 0x1AC) >> 2]; + uint32_t pipe_loopback_ctrl; + uint32_t rd_only_wr_en; + uint32_t reserved8[(0x1D0 - 0x1C0) >> 2]; + uint32_t axi_slave_err_resp; + uint32_t reserved9[(0x200 - 0x1D4) >> 2]; + struct al_pcie_core_iatu_regs iatu; + uint32_t reserved10[(0x448 - 0x270) >> 2]; +}; + +struct al_pcie_core_reg_space { + uint32_t config_header[0x40 >> 2]; + uint32_t pcie_pm_cap_base; + uint32_t reserved1[(0x70 - 0x44) >> 2]; + uint32_t pcie_cap_base; + uint32_t pcie_dev_cap_base; + uint32_t reserved2; + uint32_t pcie_link_cap_base; + uint32_t reserved3[(0xB0 - 0x80) >> 2]; + uint32_t msix_cap_base; + uint32_t reserved4[(0x100 - 0xB4) >> 2]; + uint32_t pcie_aer_cap_base; + uint32_t reserved5[(0x150 - 0x104) >> 2]; + uint32_t pcie_sec_ext_cap_base; + uint32_t reserved6[(0x700 - 0x154) >> 2]; + struct al_pcie_core_port_regs port_regs; +}; + +struct al_pcie_regs { + struct al_pcie_axi_regs __iomem axi; + uint32_t reserved1[(AL_PCIE_APP_REGS_OFFSET - + (AL_PCIE_AXI_REGS_OFFSET + + sizeof(struct al_pcie_axi_regs))) >> 2]; + struct al_pcie_w_regs __iomem app; + uint32_t reserved2[(AL_PCIE_CORE_CONF_BASE_OFFSET - + (AL_PCIE_APP_REGS_OFFSET + + sizeof(struct al_pcie_w_regs))) >> 2]; + struct al_pcie_core_reg_space core_space; +}; + +#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP 0 +#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC 4 + +#define PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT 18 +#define PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT 19 +#define PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT 20 + +#define PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT 9 +#define PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT 16 + +#define PCIE_PORT_GEN3_EQ_LF_SHIFT 0 +#define PCIE_PORT_GEN3_EQ_LF_MASK 0x3f +#define PCIE_PORT_GEN3_EQ_FS_SHIFT 6 +#define PCIE_PORT_GEN3_EQ_FS_MASK (0x3f << PCIE_PORT_GEN3_EQ_FS_SHIFT) + +#define PCIE_PORT_LINK_CTRL_LB_EN_SHIFT 2 +#define PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT 7 +#define PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT 31 + +#define PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT 0 + +/* filter_mask_reg_1 register */ +/* + * 0: Treat Function MisMatched TLPs as UR + * 1: Treat Function MisMatched TLPs as Supported + */ +#define CX_FLT_MASK_UR_FUNC_MISMATCH AL_BIT(16) + +/* + * 0: Treat CFG type1 TLPs as UR for EP; Supported for RC + * 1: Treat CFG type1 TLPs as Supported for EP; UR for RC + */ +#define CX_FLT_MASK_CFG_TYPE1_RE_AS_UR AL_BIT(19) + +/* + * 0: Enforce requester id match for received CPL TLPs. + * A violation results in cpl_abort, and possibly AER of unexp_cpl_err, + * cpl_rcvd_ur, cpl_rcvd_ca + * 1: Mask requester id match for received CPL TLPs + */ +#define CX_FLT_MASK_CPL_REQID_MATCH AL_BIT(22) + +/* + * 0: Enforce function match for received CPL TLPs. + * A violation results in cpl_abort, and possibly AER of unexp_cpl_err, + * cpl_rcvd_ur, cpl_rcvd_ca + * 1: Mask function match for received CPL TLPs + */ +#define CX_FLT_MASK_CPL_FUNC_MATCH AL_BIT(23) + +/* vc0_posted_rcv_q_ctrl register */ +#define RADM_PQ_HCRD_VC0_MASK AL_FIELD_MASK(19, 12) +#define RADM_PQ_HCRD_VC0_SHIFT 12 + +/* vc0_non_posted_rcv_q_ctrl register */ +#define RADM_NPQ_HCRD_VC0_MASK AL_FIELD_MASK(19, 12) +#define RADM_NPQ_HCRD_VC0_SHIFT 12 + +/* vc0_comp_rcv_q_ctrl register */ +#define RADM_CPLQ_HCRD_VC0_MASK AL_FIELD_MASK(19, 12) +#define RADM_CPLQ_HCRD_VC0_SHIFT 12 + +#endif + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_w_reg.h b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_w_reg.h new file mode 100644 index 00000000000000..c75fc8e2f1ac05 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_pcie_w_reg.h @@ -0,0 +1,658 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + + +#ifndef __AL_HAL_PCIE_W_REG_H__ +#define __AL_HAL_PCIE_W_REG_H__ + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + + + +struct al_pcie_w_global_ctrl { + /* [0x0] */ + uint32_t port_init; + /* [0x4] */ + uint32_t port_status; + /* [0x8] */ + uint32_t pm_control; + uint32_t rsrvd_0; + /* [0x10] */ + uint32_t events_gen; + uint32_t rsrvd[3]; +}; +struct al_pcie_w_lcl_log { + uint32_t rsrvd_0[4]; + /* [0x10] */ + uint32_t cpl_to_info; + uint32_t rsrvd_1[3]; + /* [0x20] */ + uint32_t rcv_msg0_0; + /* [0x24] */ + uint32_t rcv_msg0_1; + /* [0x28] */ + uint32_t rcv_msg0_2; + uint32_t rsrvd_2; + /* [0x30] */ + uint32_t rcv_msg1_0; + /* [0x34] */ + uint32_t rcv_msg1_1; + /* [0x38] */ + uint32_t rcv_msg1_2; + uint32_t rsrvd_3; + /* [0x40] */ + uint32_t core_q_status; + uint32_t rsrvd[7]; +}; +struct al_pcie_w_debug { + /* [0x0] */ + uint32_t info_0; + /* [0x4] */ + uint32_t info_1; + /* [0x8] */ + uint32_t info_2; + uint32_t rsrvd; +}; +struct al_pcie_w_ob_ven_msg { + /* [0x0] */ + uint32_t control; + /* [0x4] */ + uint32_t param_1; + /* [0x8] */ + uint32_t param_2; + /* [0xc] */ + uint32_t data_high; + uint32_t rsrvd_0; + /* [0x14] */ + uint32_t data_low; + uint32_t rsrvd[2]; +}; +struct al_pcie_w_soc_int { + /* [0x0] */ + uint32_t status_0; + /* [0x4] */ + uint32_t status_1; + /* [0x8] */ + uint32_t status_2; + /* [0xc] */ + uint32_t mask_inta_leg_0; + /* [0x10] */ + uint32_t mask_inta_leg_1; + /* [0x14] */ + uint32_t mask_inta_leg_2; + /* [0x18] */ + uint32_t mask_msi_leg_0; + /* [0x1c] */ + uint32_t mask_msi_leg_1; + /* [0x20] */ + uint32_t mask_msi_leg_2; + /* [0x24] */ + uint32_t msi_leg_cntl; +}; +struct al_pcie_w_link_down { + /* [0x0] */ + uint32_t reset_delay; + /* [0x4] */ + uint32_t reset_extend_rsrvd; +}; +struct al_pcie_w_cntl_gen { + /* [0x0] */ + uint32_t features; +}; +struct al_pcie_w_parity { + /* [0x0] */ + uint32_t en_core; + /* [0x4] */ + uint32_t status_core; +}; +struct al_pcie_w_last_wr { + /* [0x0] */ + uint32_t cfg_addr; +}; +struct al_pcie_w_atu { + /* [0x0] */ + uint32_t in_mask_pair[6]; + /* [0x18] */ + uint32_t out_mask_pair[6]; +}; +struct al_pcie_w_cfg_elbi { + /* [0x0] */ + uint32_t emulation; +}; +struct al_pcie_w_emulatecfg { + /* [0x0] */ + uint32_t data; + /* [0x4] */ + uint32_t addr; + /* [0x8] */ + uint32_t cmpl; +}; +struct al_pcie_w_int_grp_a { + /* [0x0] Interrupt Cause RegisterSet by hardware - If MSI-X ... */ + uint32_t cause_a; + uint32_t rsrvd_0; + /* [0x8] Interrupt Cause Set RegisterWriting 1 to a bit in t ... */ + uint32_t cause_set_a; + uint32_t rsrvd_1; + /* [0x10] Interrupt Mask RegisterIf Auto-mask control bit =TR ... */ + uint32_t mask_a; + uint32_t rsrvd_2; + /* [0x18] Interrupt Mask Clear RegisterUsed when auto-mask co ... */ + uint32_t mask_clear_a; + uint32_t rsrvd_3; + /* [0x20] Interrupt Status RegisterThis register latches the ... */ + uint32_t status_a; + uint32_t rsrvd_4; + /* [0x28] Interrupt Control Register */ + uint32_t control_a; + uint32_t rsrvd_5; + /* [0x30] Interrupt Mask RegisterEach bit in this register ma ... */ + uint32_t abort_mask_a; + uint32_t rsrvd_6; + /* [0x38] Interrupt Log RegisterEach bit in this register mas ... */ + uint32_t log_mask_a; + uint32_t rsrvd; +}; +struct al_pcie_w_int_grp_b { + /* [0x0] Interrupt Cause RegisterSet by hardware- If MSI-X i ... */ + uint32_t cause_b; + uint32_t rsrvd_0; + /* [0x8] Interrupt Cause Set RegisterWriting 1 to a bit in t ... */ + uint32_t cause_set_b; + uint32_t rsrvd_1; + /* [0x10] Interrupt Mask RegisterIf Auto-mask control bit =TR ... */ + uint32_t mask_b; + uint32_t rsrvd_2; + /* [0x18] Interrupt Mask Clear RegisterUsed when auto-mask co ... */ + uint32_t mask_clear_b; + uint32_t rsrvd_3; + /* [0x20] Interrupt Status RegisterThis register latches the ... */ + uint32_t status_b; + uint32_t rsrvd_4; + /* [0x28] Interrupt Control Register */ + uint32_t control_b; + uint32_t rsrvd_5; + /* [0x30] Interrupt Mask RegisterEach bit in this register ma ... */ + uint32_t abort_mask_b; + uint32_t rsrvd_6; + /* [0x38] Interrupt Log RegisterEach bit in this register mas ... */ + uint32_t log_mask_b; + uint32_t rsrvd; +}; + +struct al_pcie_w_regs { + struct al_pcie_w_global_ctrl global_ctrl; /* [0x0] */ + struct al_pcie_w_lcl_log lcl_log; /* [0x20] */ + struct al_pcie_w_debug debug; /* [0x80] */ + struct al_pcie_w_ob_ven_msg ob_ven_msg; /* [0x90] */ + uint32_t rsrvd_0[84]; + struct al_pcie_w_soc_int soc_int; /* [0x200] */ + struct al_pcie_w_link_down link_down; /* [0x228] */ + struct al_pcie_w_cntl_gen ctrl_gen; /* [0x230] */ + struct al_pcie_w_parity parity; /* [0x234] */ + struct al_pcie_w_last_wr last_wr; /* [0x23c] */ + struct al_pcie_w_atu atu; /* [0x240] */ + struct al_pcie_w_cfg_elbi cfg_elbi; /* [0x270] */ + struct al_pcie_w_emulatecfg emulatecfg; /* [0x274] */ + uint32_t rsrvd_1[32]; + struct al_pcie_w_int_grp_a int_grp_a_m0; /* [0x300] */ + struct al_pcie_w_int_grp_b int_grp_b_m0; /* [0x340] */ + uint32_t rsrvd_2[32]; + struct al_pcie_w_int_grp_a int_grp_a; /* [0x400] */ + struct al_pcie_w_int_grp_b int_grp_b; /* [0x440] */ +}; + + +/* +* Registers Fields +*/ + + +/**** Port_Init register ****/ +/* Enable port to start LTSSM Link Training */ +#define PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN (1 << 0) +/* Device TypeIndicates the specific type of this PCIe Function */ +#define PCIE_W_GLOBAL_CTRL_PORT_INIT_DEVICE_TYPE_MASK 0x000000F0 +#define PCIE_W_GLOBAL_CTRL_PORT_INIT_DEVICE_TYPE_SHIFT 4 +/* Performs Manual Lane reversal for transmit Lanes */ +#define PCIE_W_GLOBAL_CTRL_PORT_INIT_TX_LANE_FLIP_EN (1 << 8) +/* Performs Manual Lane reversal for receive Lanes */ +#define PCIE_W_GLOBAL_CTRL_PORT_INIT_RX_LANE_FLIP_EN (1 << 9) +/* Auxiliary Power DetectedIndicates that auxiliary power (Vaux) ... */ +#define PCIE_W_GLOBAL_CTRL_PORT_INIT_SYS_AUX_PWR_DET_NOT_USE (1 << 10) + +/**** Port_Status register ****/ +/* PHY Link up/down indicator */ +#define PCIE_W_GLOBAL_CTRL_PORT_STS_PHY_LINK_UP (1 << 0) +/* Data Link Layer up/down indicatorThis status from the Flow Co ... */ +#define PCIE_W_GLOBAL_CTRL_PORT_STS_DL_LINK_UP (1 << 1) +/* Reset request due to link down status. */ +#define PCIE_W_GLOBAL_CTRL_PORT_STS_LINK_REQ_RST (1 << 2) +/* Power management is in L0s state.. */ +#define PCIE_W_GLOBAL_CTRL_PORT_STS_PM_LINKST_IN_L0S (1 << 3) +/* Power management is in L1 state. */ +#define PCIE_W_GLOBAL_CTRL_PORT_STS_PM_LINKST_IN_L1 (1 << 4) +/* Power management is in L2 state. */ +#define PCIE_W_GLOBAL_CTRL_PORT_STS_PM_LINKST_IN_L2 (1 << 5) +/* Power management is exiting L2 state. */ +#define PCIE_W_GLOBAL_CTRL_PORT_STS_PM_LINKST_L2_EXIT (1 << 6) +/* Power state of the device. */ +#define PCIE_W_GLOBAL_CTRL_PORT_STS_PM_DSTATE_MASK 0x00000380 +#define PCIE_W_GLOBAL_CTRL_PORT_STS_PM_DSTATE_SHIFT 7 +/* Los state */ +#define PCIE_W_GLOBAL_CTRL_PORT_STS_XMLH_IN_RL0S (1 << 10) +/* Timeout count before flush */ +#define PCIE_W_GLOBAL_CTRL_PORT_STS_LINK_TOUT_FLUSH_NOT (1 << 11) +/* Clock Turnoff RequestAllows clock generation module to turn o ... */ +#define PCIE_W_GLOBAL_CTRL_PORT_STS_CORE_CLK_REQ_N (1 << 31) + +/**** PM_Control register ****/ +/* Wake Up */ +#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_PM_XMT_PME (1 << 0) +/* Request to Enter ASPM L1 */ +#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_REQ_ENTR_L1 (1 << 3) +/* Request to exit ASPM L1. +Only effective if L1 is enabled. */ +#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_REQ_EXIT_L1 (1 << 4) +/* Indication that component is ready to enter the L23 state */ +#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_READY_ENTR_L23 (1 << 5) +/* Request to generate a PM_Turn_Off Message to communicate tra ... */ +#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_PM_XMT_TURNOFF (1 << 6) +/* Provides a capability to defer incoming Configuration Request ... */ +#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN (1 << 7) +/* Core core gate enableIf set, core_clk is gated off whenever a ... */ +#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_CORE_CLK_GATE (1 << 31) + +/**** Events_Gen register ****/ +/* INT_D. Not supported */ +#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_ASSERT_INTD (1 << 0) +/* INT_C. Not supported */ +#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_ASSERT_INTC (1 << 1) +/* INT_B. Not supported */ +#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_ASSERT_INTB (1 << 2) +/* Transmit INT_A Interrupt ControlEvery transition from 0 to 1 ... */ +#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_ASSERT_INTA (1 << 3) +/* A request to generate an outbound MSI interrupt when MSI is e ... */ +#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_TRNS_REQ (1 << 4) +/* Set the MSI vector before issuing msi_trans_req. */ +#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_MASK 0x000003E0 +#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_SHIFT 5 +/* The application requests hot reset to a downstream device */ +#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT (1 << 10) +/* The application request unlock message to be sent */ +#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_UNLOCK_GEN (1 << 30) +/* Indicates that FLR on a Physical Function has been completed */ +#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE (1 << 31) + +/**** Cpl_TO_Info register ****/ +/* The Traffic Class of the timed out CPL */ +#define PCIE_W_LCL_LOG_CPL_TO_INFO_TC_MASK 0x00000003 +#define PCIE_W_LCL_LOG_CPL_TO_INFO_TC_SHIFT 0 +/* Indicates which Virtual Function (VF) had a CPL timeout */ +#define PCIE_W_LCL_LOG_CPL_TO_INFO_FUN_NUM_MASK 0x000000FC +#define PCIE_W_LCL_LOG_CPL_TO_INFO_FUN_NUM_SHIFT 2 +/* The Tag field of the timed out CPL */ +#define PCIE_W_LCL_LOG_CPL_TO_INFO_TAG_MASK 0x0000FF00 +#define PCIE_W_LCL_LOG_CPL_TO_INFO_TAG_SHIFT 8 +/* The Attributes field of the timed out CPL */ +#define PCIE_W_LCL_LOG_CPL_TO_INFO_ATTR_MASK 0x00030000 +#define PCIE_W_LCL_LOG_CPL_TO_INFO_ATTR_SHIFT 16 +/* The Len field of the timed out CPL */ +#define PCIE_W_LCL_LOG_CPL_TO_INFO_LEN_MASK 0x3FFC0000 +#define PCIE_W_LCL_LOG_CPL_TO_INFO_LEN_SHIFT 18 +/* Write 1 to this field to clear the information logged in the ... */ +#define PCIE_W_LCL_LOG_CPL_TO_INFO_VALID (1 << 31) + +/**** Rcv_Msg0_0 register ****/ +/* The Requester ID of the received message */ +#define PCIE_W_LCL_LOG_RCV_MSG0_0_REQ_ID_MASK 0x0000FFFF +#define PCIE_W_LCL_LOG_RCV_MSG0_0_REQ_ID_SHIFT 0 +/* Valid logged messageWriting 1 to this bit enables new message ... */ +#define PCIE_W_LCL_LOG_RCV_MSG0_0_VALID (1 << 31) + +/**** Rcv_Msg1_0 register ****/ +/* The Requester ID of the received message */ +#define PCIE_W_LCL_LOG_RCV_MSG1_0_REQ_ID_MASK 0x0000FFFF +#define PCIE_W_LCL_LOG_RCV_MSG1_0_REQ_ID_SHIFT 0 +/* Valid logged messageWriting 1 to this bit enables new message ... */ +#define PCIE_W_LCL_LOG_RCV_MSG1_0_VALID (1 << 31) + +/**** Core_Queues_Status register ****/ +/* Indicates which entries in the CPL lookup tablehave valid ent ... */ +#define PCIE_W_LCL_LOG_CORE_Q_STATUS_CPL_LUT_VALID_MASK 0x0000FFFF +#define PCIE_W_LCL_LOG_CORE_Q_STATUS_CPL_LUT_VALID_SHIFT 0 + +/**** Debug_Info_0 register ****/ +/* Indicates the current power state */ +#define PCIE_W_DEBUG_INFO_0_PM_CURRENT_STATE_MASK 0x00000007 +#define PCIE_W_DEBUG_INFO_0_PM_CURRENT_STATE_SHIFT 0 +/* Current state of the LTSSM */ +#define PCIE_W_DEBUG_INFO_0_LTSSM_STATE_MASK 0x000001F8 +#define PCIE_W_DEBUG_INFO_0_LTSSM_STATE_SHIFT 3 +/* Decode of the Recovery. Equalization LTSSM state */ +#define PCIE_W_DEBUG_INFO_0_LTSSM_STATE_RCVRY_EQ (1 << 9) + +/**** control register ****/ +/* Indication to send vendor message; when clear the message was ... */ +#define PCIE_W_OB_VEN_MSG_CONTROL_REQ (1 << 0) + +/**** param_1 register ****/ +/* Vendor message parameters */ +#define PCIE_W_OB_VEN_MSG_PARAM_1_FMT_MASK 0x00000003 +#define PCIE_W_OB_VEN_MSG_PARAM_1_FMT_SHIFT 0 +/* Vendor message parameters */ +#define PCIE_W_OB_VEN_MSG_PARAM_1_TYPE_MASK 0x0000007C +#define PCIE_W_OB_VEN_MSG_PARAM_1_TYPE_SHIFT 2 +/* Vendor message parameters */ +#define PCIE_W_OB_VEN_MSG_PARAM_1_TC_MASK 0x00000380 +#define PCIE_W_OB_VEN_MSG_PARAM_1_TC_SHIFT 7 +/* Vendor message parameters */ +#define PCIE_W_OB_VEN_MSG_PARAM_1_TD (1 << 10) +/* Vendor message parameters */ +#define PCIE_W_OB_VEN_MSG_PARAM_1_EP (1 << 11) +/* Vendor message parameters */ +#define PCIE_W_OB_VEN_MSG_PARAM_1_ATTR_MASK 0x00003000 +#define PCIE_W_OB_VEN_MSG_PARAM_1_ATTR_SHIFT 12 +/* Vendor message parameters */ +#define PCIE_W_OB_VEN_MSG_PARAM_1_LEN_MASK 0x00FFC000 +#define PCIE_W_OB_VEN_MSG_PARAM_1_LEN_SHIFT 14 +/* Vendor message parameters */ +#define PCIE_W_OB_VEN_MSG_PARAM_1_TAG_MASK 0xFF000000 +#define PCIE_W_OB_VEN_MSG_PARAM_1_TAG_SHIFT 24 + +/**** param_2 register ****/ +/* Vendor message parameters */ +#define PCIE_W_OB_VEN_MSG_PARAM_2_REQ_ID_MASK 0x0000FFFF +#define PCIE_W_OB_VEN_MSG_PARAM_2_REQ_ID_SHIFT 0 +/* Vendor message parameters */ +#define PCIE_W_OB_VEN_MSG_PARAM_2_CODE_MASK 0x00FF0000 +#define PCIE_W_OB_VEN_MSG_PARAM_2_CODE_SHIFT 16 +/* Vendor message parameters */ +#define PCIE_W_OB_VEN_MSG_PARAM_2_RSVD_31_24_MASK 0xFF000000 +#define PCIE_W_OB_VEN_MSG_PARAM_2_RSVD_31_24_SHIFT 24 + +/**** features register ****/ +/* Enable MSI fix from the SATA to the PCIe EP - Only valid for port zero */ +#define PCIE_W_CTRL_GEN_FEATURES_SATA_EP_MSI_FIX AL_BIT(16) + +/**** in/out_mask_x_y register ****/ +/* When bit [i] set to 1 it maks the compare in the atu_in/out wind ... */ +#define PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK 0x0000FFFF +#define PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT 0 +/* When bit [i] set to 1 it maks the compare in the atu_in/out wind ... */ +#define PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK 0xFFFF0000 +#define PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT 16 + +/* emulation register */ + +/* + * Force all inbound PF0 configuration read to ELBI (emulation interface) + */ +#define PCIE_W_CFG_EMUL_CTRL_FORCE_FUN_0_CFG_ELBI AL_BIT(0) +/* + * Force all non-PF0 inbound configuration read to ELBI + */ +#define PCIE_W_CFG_EMUL_CTRL_FORCE_FUN_NO0_CFG_ELBI AL_BIT(1) +/* + * Enable direct connection between DBI and CDM. + * By default, local CPU can not access the PCIe Core Configuration Space (CDM) + * through DBI interface if there is pended inbound configuration read or + * write. + * In emulation mode, since the inbound configuration is stalled and CPU must + * access to core configuration space before releasing the inbound + * configuration transaction, it's required to enable direct path to CPU. + */ +#define PCIE_W_CFG_EMUL_CTRL_EMUL_DBI_FORCE_CDM_EN AL_BIT(2) +/* + * Disable config direction to trgt1 if above CONFIG_LIMIT. + * i.e. direct all inbound configuration access to emulation interface + */ +#define PCIE_W_CFG_EMUL_CTRL_EMULCFG_ABOVE_LIMIT_DIS AL_BIT(3) +/* + * ARI emulation enable, this emulates 8 bits function number, instead of 3. + * Since our PCIe core does not have the ARI capability and it's single + * function, requester and completer ID are in the form of {bus, dev, fun}, + * when bus and dev numbers are latched from the received configuration write. + * When this bit is set, dev number is overridden by the function number when + * function >= 8. + */ +#define PCIE_W_CFG_EMUL_CTRL_AP_ARI_EMUL_EN AL_BIT(8) +/* + * Disbale all FLR functionality within the core for both PF and VF. By + * default the core resets internal data structures and terminate pended + * requests. Since now all the resources are being used for all functions, it's + * not correct to apply FLR on the core. When setting this bit, FLR is + * propaged as configuration write to emulation and emulation driver should + * handle it by SW. + */ +#define PCIE_W_CFG_EMUL_CTRL_EMULCFG_PFVF_FLR_DIS AL_BIT(9) +/* + * Disable FLR for func !=0 functionality within the core + */ +#define PCIE_W_CFG_EMUL_CTRL_EMULCFG_VF_FLR_DIS AL_BIT(10) +/* + * Enable multi-function (VMID) propagation for outbound requests. + */ +#define PCIE_W_CFG_EMUL_CTRL_SRVIO_VFUNC_EN AL_BIT(16) +/* + * Fix client1 FMT bits after cutting address 63:56, fix address format to + * 32-bits if original request is 32-bit address. + */ +#define PCIE_W_CFG_EMUL_CTRL_FIX_CLIENT1_FMT_EN AL_BIT(17) + +/* address register */ + +/* Valid address - Cleared on read */ +#define PCIE_W_CFG_EMUL_ADDR_VALID AL_BIT(0) + +/* Received Configuration Type: CfgType0 (=0) or CfgType1 (=1) */ +#define PCIE_W_CFG_EMUL_ADDR_CFG_TYPE AL_BIT(1) +#define PCIE_W_CFG_EMUL_ADDR_CFG_TYPE_0 0 +#define PCIE_W_CFG_EMUL_ADDR_CFG_TYPE_1 AL_BIT(1) + +/* Target register offset (including extended register) */ +#define PCIE_W_CFG_EMUL_ADDR_REG_OFFSET_MASK AL_FIELD_MASK(11, 2) +#define PCIE_W_CFG_EMUL_ADDR_REG_OFFSET_SHIFT 2 + +/* + * Received Byte Enable. + * If 4'b0000, the received packet is Configuration Read transaction, otherwise + * Configuration Write with corresponding 4-bits Byte Enable. + */ +#define PCIE_W_CFG_EMUL_ADDR_BYTE_ENABLE_MASK AL_FIELD_MASK(15, 12) +#define PCIE_W_CFG_EMUL_ADDR_BYTE_ENABLE_SHIFT 12 + +/* + * Dev_Fun + * - Non-ARI: [19:16] target function num, [23:19] target device number + * - ARI: [23:16] target function number + */ +#define PCIE_W_CFG_EMUL_ADDR_DEV_FUN_MASK AL_FIELD_MASK(23, 16) +#define PCIE_W_CFG_EMUL_ADDR_DEV_FUN_SHIFT 16 + +/* Target Bus Number */ +#define PCIE_W_CFG_EMUL_ADDR_BUS_NUM_MASK AL_FIELD_MASK(31, 24) +#define PCIE_W_CFG_EMUL_ADDR_BUS_NUM_SHIFT 24 + +/**** cause_A register ****/ +/* Deassert_INTD received. Write zero to clear this bit. */ +#define PCIE_W_INT_GRP_A_CAUSE_A_DEASSERT_INTD (1 << 0) +/* Deassert_INTC received. Write zero to clear this bit. */ +#define PCIE_W_INT_GRP_A_CAUSE_A_DEASSERT_INTC (1 << 1) +/* Deassert_INTB received. Write zero to clear this bit. */ +#define PCIE_W_INT_GRP_A_CAUSE_A_DEASSERT_INTB (1 << 2) +/* Deassert_INTA received. Write zero to clear this bit. */ +#define PCIE_W_INT_GRP_A_CAUSE_A_DEASSERT_INTA (1 << 3) +/* Assert_INTD received. Write zero to clear this bit. */ +#define PCIE_W_INT_GRP_A_CAUSE_A_ASSERT_INTD (1 << 4) +/* Assert_INTC received. Write zero to clear this bit. */ +#define PCIE_W_INT_GRP_A_CAUSE_A_ASSERT_INTC (1 << 5) +/* Assert_INTC received. Write zero to clear this bit. */ +#define PCIE_W_INT_GRP_A_CAUSE_A_ASSERT_INTB (1 << 6) +/* Assert_INTA received. Write zero to clear this bit. */ +#define PCIE_W_INT_GRP_A_CAUSE_A_ASSERT_INTA (1 << 7) +/* MSI Controller InterruptMSI interrupt is being received */ +#define PCIE_W_INT_GRP_A_CAUSE_A_MSI_CNTR_RCV_INT (1 << 8) +/* MSI sent grant. Write zero to clear this bit. */ +#define PCIE_W_INT_GRP_A_CAUSE_A_MSI_TRNS_GNT (1 << 9) +/* System error detected Indicates if any device in the hierarch ... */ +#define PCIE_W_INT_GRP_A_CAUSE_A_SYS_ERR_RC (1 << 10) +/* Set when software initiates FLR on a Physical Function by wri ... */ +#define PCIE_W_INT_GRP_A_CAUSE_A_FLR_PF_ACTIVE (1 << 11) +/* Reported error condition causes a bit to be set in the Root E ... */ +#define PCIE_W_INT_GRP_A_CAUSE_A_AER_RC_ERR (1 << 12) +/* The core asserts aer_rc_err_msi when all of the following con ... */ +#define PCIE_W_INT_GRP_A_CAUSE_A_AER_RC_ERR_MSI (1 << 13) +/* Wake Up */ +#define PCIE_W_INT_GRP_A_CAUSE_A_WAKE (1 << 14) +/* The core asserts cfg_pme_int when all of the following condit ... */ +#define PCIE_W_INT_GRP_A_CAUSE_A_PME_INT (1 << 15) +/* The core asserts cfg_pme_msi when all of the following condit ... */ +#define PCIE_W_INT_GRP_A_CAUSE_A_PME_MSI (1 << 16) +/* The core asserts hp_pme when all of the following conditions ... */ +#define PCIE_W_INT_GRP_A_CAUSE_A_HP_PME (1 << 17) +/* The core asserts hp_int when all of the following conditions ... */ +#define PCIE_W_INT_GRP_A_CAUSE_A_HP_INT (1 << 18) +/* The core asserts hp_msi when the logical AND of the followin ... */ +#define PCIE_W_INT_GRP_A_CAUSE_A_HP_MSI (1 << 19) +/* Read VPD registers notification */ +#define PCIE_W_INT_GRP_A_CAUSE_A_VPD_INT (1 << 20) +/* The core assert link down event, whenever the link is going d ... */ +#define PCIE_W_INT_GRP_A_CAUSE_A_LINK_DOWN_EVENT (1 << 21) +/* When the EP gets a command to shut down, signal the software ... */ +#define PCIE_W_INT_GRP_A_CAUSE_A_PM_XTLH_BLOCK_TLP (1 << 22) +/* PHY/MAC link up */ +#define PCIE_W_INT_GRP_A_CAUSE_A_XMLH_LINK_UP (1 << 23) +/* Data link up */ +#define PCIE_W_INT_GRP_A_CAUSE_A_RDLH_LINK_UP (1 << 24) +/* The ltssm is in RCVRY_LOCK state. */ +#define PCIE_W_INT_GRP_A_CAUSE_A_LTSSM_RCVRY_STATE (1 << 25) +/* Config write transaction to the config space by the RC peer, ... */ +#define PCIE_W_INT_GRP_A_CAUSE_A_CFG_WR_EVENT (1 << 26) +/* When emulation mode is active, every cfg access in EP mode will cause INT. */ +#define PCIE_W_INT_GRP_A_CAUSE_A_CFG_EMUL_EVENT (1 << 31) + +/**** control_A register ****/ +/* When Clear_on_Read =1, all bits of Cause register are cleare ... */ +#define PCIE_W_INT_GRP_A_CONTROL_A_CLEAR_ON_READ (1 << 0) +/* (Must be set only when MSIX is enabled */ +#define PCIE_W_INT_GRP_A_CONTROL_A_AUTO_MASK (1 << 1) +/* Auto_Clear (RW)When Auto-Clear =1, the bits in the Interrupt ... */ +#define PCIE_W_INT_GRP_A_CONTROL_A_AUTO_CLEAR (1 << 2) +/* When Set_on_Posedge =1, the bits in the Interrupt Cause regis ... */ +#define PCIE_W_INT_GRP_A_CONTROL_A_SET_ON_POSEDGE (1 << 3) +/* When Moderation_Reset =1, all Moderation timers associated wi ... */ +#define PCIE_W_INT_GRP_A_CONTROL_A_MOD_RST (1 << 4) +/* When mask_msi_x =1, no MSI-X from this group is sent */ +#define PCIE_W_INT_GRP_A_CONTROL_A_MASK_MSI_X (1 << 5) +/* MSI-X AWID value. Same ID for all cause bits. */ +#define PCIE_W_INT_GRP_A_CONTROL_A_AWID_MASK 0x00000F00 +#define PCIE_W_INT_GRP_A_CONTROL_A_AWID_SHIFT 8 +/* This value determines the interval between interrupts; writin ... */ +#define PCIE_W_INT_GRP_A_CONTROL_A_MOD_INTV_MASK 0x00FF0000 +#define PCIE_W_INT_GRP_A_CONTROL_A_MOD_INTV_SHIFT 16 +/* This value determines the Moderation_Timer_Clock speed */ +#define PCIE_W_INT_GRP_A_CONTROL_A_MOD_RES_MASK 0x0F000000 +#define PCIE_W_INT_GRP_A_CONTROL_A_MOD_RES_SHIFT 24 + +/**** cause_B register ****/ +/* Indicates that the core received a PM_PME Message */ +#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_PM_PME (1 << 0) +/* Indicates that the core received a PME_TO_Ack Message */ +#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_PM_TO_ACK (1 << 1) +/* Indicates that the core received an PME_Turn_Off Message */ +#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_PM_TURNOFF (1 << 2) +/* Indicates that the core received an ERR_CORR Message */ +#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_CORRECTABLE_ERR (1 << 3) +/* Indicates that the core received an ERR_NONFATAL Message */ +#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_NONFATAL_ERR (1 << 4) +/* Indicates that the core received an ERR_FATAL Message */ +#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_FATAL_ERR (1 << 5) +/* Indicates that the core received a Vendor Defined Message */ +#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_VENDOR_0 (1 << 6) +/* Indicates that the core received a Vendor Defined Message */ +#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_VENDOR_1 (1 << 7) +/* Indicates that the core received an Unlock Message */ +#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_UNLOCK (1 << 8) +/* Notification when the Link Autonomous Bandwidth Status regist ... */ +#define PCIE_W_INT_GRP_B_CAUSE_B_LINK_AUTO_BW_INT (1 << 12) +/* Notification that the Link Equalization Request bit in the Li ... */ +#define PCIE_W_INT_GRP_B_CAUSE_B_LINK_EQ_REQ_INT (1 << 13) +/* OB Vendor message request is granted by the PCIe core Write ... */ +#define PCIE_W_INT_GRP_B_CAUSE_B_VENDOR_MSG_GRANT (1 << 14) +/* CPL timeout from the PCIe core inidication */ +#define PCIE_W_INT_GRP_B_CAUSE_B_CMP_TIME_OUT (1 << 15) +/* Slave Response Composer Lookup ErrorIndicates that an overflo ... */ +#define PCIE_W_INT_GRP_B_CAUSE_B_RADMX_CMPOSER_LOOKUP_ERR (1 << 16) +/* Parity Error */ +#define PCIE_W_INT_GRP_B_CAUSE_B_PARITY_ERROR_CORE (1 << 17) + +/**** control_B register ****/ +/* When Clear_on_Read =1, all bits of the Cause register are cle ... */ +#define PCIE_W_INT_GRP_B_CONTROL_B_CLEAR_ON_READ (1 << 0) +/* (Must be set only when MSIX is enabled */ +#define PCIE_W_INT_GRP_B_CONTROL_B_AUTO_MASK (1 << 1) +/* Auto_Clear (RW)When Auto-Clear =1, the bits in the Interrupt ... */ +#define PCIE_W_INT_GRP_B_CONTROL_B_AUTO_CLEAR (1 << 2) +/* When Set_on_Posedge =1, the bits in the interrupt Cause regis ... */ +#define PCIE_W_INT_GRP_B_CONTROL_B_SET_ON_POSEDGE (1 << 3) +/* When Moderation_Reset =1, all Moderation timers associated wi ... */ +#define PCIE_W_INT_GRP_B_CONTROL_B_MOD_RST (1 << 4) +/* When mask_msi_x =1, no MSI-X from this group is sent */ +#define PCIE_W_INT_GRP_B_CONTROL_B_MASK_MSI_X (1 << 5) +/* MSI-X AWID value. Same ID for all cause bits. */ +#define PCIE_W_INT_GRP_B_CONTROL_B_AWID_MASK 0x00000F00 +#define PCIE_W_INT_GRP_B_CONTROL_B_AWID_SHIFT 8 +/* This value determines the interval between interrupts */ +#define PCIE_W_INT_GRP_B_CONTROL_B_MOD_INTV_MASK 0x00FF0000 +#define PCIE_W_INT_GRP_B_CONTROL_B_MOD_INTV_SHIFT 16 +/* This value determines the Moderation_Timer_Clock speed */ +#define PCIE_W_INT_GRP_B_CONTROL_B_MOD_RES_MASK 0x0F000000 +#define PCIE_W_INT_GRP_B_CONTROL_B_MOD_RES_SHIFT 24 + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_pcie_w_REG_H */ + +/** @} end of ... group */ + + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_serdes.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_serdes.c new file mode 100644 index 00000000000000..8f7b44ce1a28a3 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_serdes.c @@ -0,0 +1,2702 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#include +#include +#include + +#define SRDS_CORE_REG_ADDR(page, type, offset)\ + (((page) << 13) | ((type) << 12) | (offset)) + +/* Link Training configuration */ +#define AL_SERDES_TX_DEEMPH_SUM_MAX 0x1b + +/* c configurations */ +#define AL_SERDES_TX_DEEMPH_C_ZERO_MAX_VAL 0x1b +#define AL_SERDES_TX_DEEMPH_C_ZERO_MIN_VAL 0 +#define AL_SERDES_TX_DEEMPH_C_ZERO_PRESET AL_SERDES_TX_DEEMPH_C_ZERO_MAX_VAL + +/* c(+1) configurations */ +#define AL_SERDES_TX_DEEMPH_C_PLUS_MAX_VAL 0x9 +#define AL_SERDES_TX_DEEMPH_C_PLUS_MIN_VAL 0 +#define AL_SERDES_TX_DEEMPH_C_PLUS_PRESET AL_SERDES_TX_DEEMPH_C_PLUS_MIN_VAL + +/* c(-1) configurations */ +#define AL_SERDES_TX_DEEMPH_C_MINUS_MAX_VAL 0x6 +#define AL_SERDES_TX_DEEMPH_C_MINUS_MIN_VAL 0 +#define AL_SERDES_TX_DEEMPH_C_MINUS_PRESET AL_SERDES_TX_DEEMPH_C_MINUS_MIN_VAL + +/* Rx equal total delay = MDELAY * TRIES */ +#define AL_SERDES_RX_EQUAL_MDELAY 10 +#define AL_SERDES_RX_EQUAL_TRIES 50 + +/* Rx eye calculation delay = MDELAY * TRIES */ +#define AL_SERDES_RX_EYE_CAL_MDELAY 50 +#define AL_SERDES_RX_EYE_CAL_TRIES 70 + + +/** + * SERDES core reg read + */ +static inline uint8_t al_serdes_grp_reg_read( + struct al_serdes_group_info *grp_info, + enum al_serdes_reg_page page, + enum al_serdes_reg_type type, + uint16_t offset); + +/** + * SERDES core reg write + */ +static inline void al_serdes_grp_reg_write( + struct al_serdes_group_info *grp_info, + enum al_serdes_reg_page page, + enum al_serdes_reg_type type, + uint16_t offset, + uint8_t data); + +/** + * SERDES core masked reg write + */ +static inline void al_serdes_grp_reg_masked_write( + struct al_serdes_group_info *grp_info, + enum al_serdes_reg_page page, + enum al_serdes_reg_type type, + uint16_t offset, + uint8_t mask, + uint8_t data); + +/******************************************************************************/ +/******************************************************************************/ +int al_serdes_handle_init( + void __iomem *serdes_regs_base, + struct al_serdes_obj *obj) +{ + int i; + + al_dbg( + "%s(%p, %p)\n", + __func__, + serdes_regs_base, + obj); + + al_assert(serdes_regs_base); + + for (i = 0; i < AL_SRDS_NUM_GROUPS; i++) { + obj->grp_info[i].pobj = obj; + + obj->grp_info[i].regs_base = + &((struct al_serdes_regs *)serdes_regs_base)[i]; + } + + return 0; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_serdes_reg_read( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_reg_page page, + enum al_serdes_reg_type type, + uint16_t offset, + uint8_t *data) +{ + int status = 0; + + al_dbg( + "%s(%p, %d, %d, %d, %u)\n", + __func__, + obj, + grp, + page, + type, + offset); + + al_assert(obj); + al_assert(data); + al_assert(((int)grp) >= AL_SRDS_GRP_A); + al_assert(((int)grp) <= AL_SRDS_GRP_D); + al_assert(((int)page) >= AL_SRDS_REG_PAGE_0_LANE_0); + al_assert(((int)page) <= AL_SRDS_REG_PAGE_4_COMMON); + al_assert(((int)type) >= AL_SRDS_REG_TYPE_PMA); + al_assert(((int)type) <= AL_SRDS_REG_TYPE_PCS); + + *data = al_serdes_grp_reg_read( + &obj->grp_info[grp], + page, + type, + offset); + + al_dbg( + "%s: return(%u)\n", + __func__, + *data); + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_serdes_reg_write( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_reg_page page, + enum al_serdes_reg_type type, + uint16_t offset, + uint8_t data) +{ + int status = 0; + + al_dbg( + "%s(%p, %d, %d, %d, %u, %u)\n", + __func__, + obj, + grp, + page, + type, + offset, + data); + + al_assert(obj); + al_assert(((int)grp) >= AL_SRDS_GRP_A); + al_assert(((int)grp) <= AL_SRDS_GRP_D); + al_assert(((int)page) >= AL_SRDS_REG_PAGE_0_LANE_0); + al_assert(((int)page) <= AL_SRDS_REG_PAGE_0123_LANES_0123); + al_assert(((int)type) >= AL_SRDS_REG_TYPE_PMA); + al_assert(((int)type) <= AL_SRDS_REG_TYPE_PCS); + + al_serdes_grp_reg_write( + &obj->grp_info[grp], + page, + type, + offset, + data); + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +#if (SERDES_IREG_FLD_PCSRX_DATAWIDTH_REG_NUM != SERDES_IREG_FLD_PCSTX_DATAWIDTH_REG_NUM) +#error "Wrong assumption!" +#endif +#if (SERDES_IREG_FLD_PCSRX_DIVRATE_REG_NUM != SERDES_IREG_FLD_PCSTX_DIVRATE_REG_NUM) +#error "Wrong assumption!" +#endif +#if (SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM != SERDES_IREG_FLD_CMNPCS_LOCWREN_REG_NUM) +#error "Wrong assumption!" +#endif +#if (SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM != SERDES_IREG_FLD_CMNPCSBIST_LOCWREN_REG_NUM) +#error "Wrong assumption!" +#endif +#if (SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM != SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN_REG_NUM) +#error "Wrong assumption!" +#endif +#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_LB_LOCWREN_REG_NUM) +#error "Wrong assumption!" +#endif +#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_PCSRX_LOCWREN_REG_NUM) +#error "Wrong assumption!" +#endif +#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_PCSRXBIST_LOCWREN_REG_NUM) +#error "Wrong assumption!" +#endif +#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM) +#error "Wrong assumption!" +#endif +#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_PCSTX_LOCWREN_REG_NUM) +#error "Wrong assumption!" +#endif +void al_serdes_bist_overrides_enable( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_rate rate) +{ + struct al_serdes_group_info *grp_info = &obj->grp_info[grp]; + int i; + + uint8_t rx_rate_val; + uint8_t tx_rate_val; + + switch (rate) { + case AL_SRDS_RATE_1_8: + rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_8; + tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_8; + break; + case AL_SRDS_RATE_1_4: + rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_4; + tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_4; + break; + case AL_SRDS_RATE_1_2: + rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_2; + tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_2; + break; + case AL_SRDS_RATE_FULL: + rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1; + tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_1; + break; + default: + al_err("%s: invalid rate (%d)\n", __func__, rate); + al_assert(0); + rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1; + tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_1; + } + + for (i = 0; i < AL_SRDS_NUM_LANES; i++) { + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)i, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCSRX_DATAWIDTH_REG_NUM, + SERDES_IREG_FLD_PCSRX_DATAWIDTH_MASK | + SERDES_IREG_FLD_PCSTX_DATAWIDTH_MASK, + SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_20 | + SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_20); + + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)i, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCSRX_DIVRATE_REG_NUM, + SERDES_IREG_FLD_PCSRX_DIVRATE_MASK | + SERDES_IREG_FLD_PCSTX_DIVRATE_MASK, + rx_rate_val | tx_rate_val); + } + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM, + SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN | + SERDES_IREG_FLD_CMNPCS_LOCWREN | + SERDES_IREG_FLD_CMNPCSBIST_LOCWREN | + SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN, + 0); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM, + SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN | + SERDES_IREG_FLD_CMNPCS_LOCWREN | + SERDES_IREG_FLD_CMNPCSBIST_LOCWREN | + SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN, + 0); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCS_LOCWREN_REG_NUM, + SERDES_IREG_FLD_PCS_LOCWREN, + 0); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_CMNPCS_TXENABLE_REG_NUM, + SERDES_IREG_FLD_CMNPCS_TXENABLE, + SERDES_IREG_FLD_CMNPCS_TXENABLE); + + for (i = 0; i < AL_SRDS_NUM_LANES; i++) { + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)i, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM, + SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN | + SERDES_IREG_FLD_LB_LOCWREN | + SERDES_IREG_FLD_PCSRX_LOCWREN | + SERDES_IREG_FLD_PCSRXBIST_LOCWREN | + SERDES_IREG_FLD_PCSRXEQ_LOCWREN | + SERDES_IREG_FLD_PCSTX_LOCWREN, + 0); + + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)i, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCSTXBIST_LOCWREN_REG_NUM, + SERDES_IREG_FLD_PCSTXBIST_LOCWREN, + 0); + + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)i, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM, + SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN, + 0); + + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)i, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM, + SERDES_IREG_FLD_RXLOCK2REF_OVREN, + SERDES_IREG_FLD_RXLOCK2REF_OVREN); + } +} + +/******************************************************************************/ +/******************************************************************************/ +void al_serdes_group_pm_set( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_pm pm) +{ + struct al_serdes_group_info *grp_info = &obj->grp_info[grp]; + + uint8_t pm_val; + + switch (pm) { + case AL_SRDS_PM_PD: + pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_PD; + break; + case AL_SRDS_PM_P2: + pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P2; + break; + case AL_SRDS_PM_P1: + pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P1; + break; + case AL_SRDS_PM_P0S: + pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0S; + break; + case AL_SRDS_PM_P0: + pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0; + break; + default: + al_err("%s: invalid power mode (%d)\n", __func__, pm); + al_assert(0); + pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0; + } + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_REG_NUM, + SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_MASK, + pm_val); +} + +/******************************************************************************/ +/******************************************************************************/ +void al_serdes_lane_pm_set( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + enum al_serdes_pm rx_pm, + enum al_serdes_pm tx_pm) +{ + struct al_serdes_group_info *grp_info = &obj->grp_info[grp]; + + uint8_t rx_pm_val; + uint8_t tx_pm_val; + + switch (rx_pm) { + case AL_SRDS_PM_PD: + rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_PD; + break; + case AL_SRDS_PM_P2: + rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P2; + break; + case AL_SRDS_PM_P1: + rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P1; + break; + case AL_SRDS_PM_P0S: + rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0S; + break; + case AL_SRDS_PM_P0: + rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0; + break; + default: + al_err("%s: invalid rx power mode (%d)\n", __func__, rx_pm); + al_assert(0); + rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0; + } + + switch (tx_pm) { + case AL_SRDS_PM_PD: + tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_PD; + break; + case AL_SRDS_PM_P2: + tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P2; + break; + case AL_SRDS_PM_P1: + tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P1; + break; + case AL_SRDS_PM_P0S: + tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0S; + break; + case AL_SRDS_PM_P0: + tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0; + break; + default: + al_err("%s: invalid tx power mode (%d)\n", __func__, tx_pm); + al_assert(0); + tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0; + } + + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_LANEPCSPSTATE_RX_REG_NUM, + SERDES_IREG_FLD_LANEPCSPSTATE_RX_MASK, + rx_pm_val); + + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_LANEPCSPSTATE_TX_REG_NUM, + SERDES_IREG_FLD_LANEPCSPSTATE_TX_MASK, + tx_pm_val); +} + +/******************************************************************************/ +/******************************************************************************/ +void al_serdes_pma_hard_reset_group( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + al_bool enable) +{ + struct al_serdes_group_info *grp_info = &obj->grp_info[grp]; + + /* Enable Hard Reset Override */ + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_REG_NUM, + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_MASK, + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_REGS); + + /* Assert/Deassert Hard Reset Override */ + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_REG_NUM, + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_MASK, + enable ? + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_ASSERT : + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_DEASSERT); +} + +/******************************************************************************/ +/******************************************************************************/ +void al_serdes_pma_hard_reset_lane( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + al_bool enable) +{ + struct al_serdes_group_info *grp_info = &obj->grp_info[grp]; + + /* Enable Hard Reset Override */ + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_REG_NUM, + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_MASK, + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_REGS); + + /* Assert/Deassert Hard Reset Override */ + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_REG_NUM, + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_MASK, + enable ? + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_ASSERT : + SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_DEASSERT); +} + +/******************************************************************************/ +/******************************************************************************/ +#if (SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN_REG_NUM !=\ + SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN_REG_NUM) ||\ + (SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN_REG_NUM !=\ + SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN_REG_NUM) ||\ + (SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN_REG_NUM !=\ + SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN_REG_NUM) ||\ + (SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN_REG_NUM !=\ + SERDES_IREG_FLD_LB_CDRCLK2TXEN_REG_NUM) +#error Wrong assumption +#endif + +void al_serdes_loopback_control( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + enum al_serdes_lb_mode mode) +{ + struct al_serdes_group_info *grp_info = &obj->grp_info[grp]; + uint8_t val = 0; + + switch (mode) { + case AL_SRDS_LB_MODE_OFF: + break; + case AL_SRDS_LB_MODE_PMA_IO_UN_TIMED_RX_TO_TX: + val = SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN; + break; + case AL_SRDS_LB_MODE_PMA_INTERNALLY_BUFFERED_SERIAL_TX_TO_RX: + val = SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN; + break; + case AL_SRDS_LB_MODE_PMA_SERIAL_TX_IO_TO_RX_IO: + val = SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN; + break; + case AL_SRDS_LB_MODE_PMA_PARALLEL_RX_TO_TX: + val = SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN | + SERDES_IREG_FLD_LB_CDRCLK2TXEN; + break; + default: + al_err("%s: invalid mode (%d)\n", __func__, mode); + al_assert(0); + } + + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN_REG_NUM, + SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN | + SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN | + SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN | + SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN | + SERDES_IREG_FLD_LB_CDRCLK2TXEN, + val); +} + +/******************************************************************************/ +/******************************************************************************/ +void al_serdes_bist_pattern_select( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_bist_pattern pattern, + uint8_t *user_data) +{ + struct al_serdes_group_info *grp_info = &obj->grp_info[grp]; + uint8_t val = 0; + + switch (pattern) { + case AL_SRDS_BIST_PATTERN_USER: + al_assert(user_data); + al_err("%s: user pattern currently not supported!\n", __func__); + al_assert(0); + break; + case AL_SRDS_BIST_PATTERN_PRBS7: + val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS7; + break; + case AL_SRDS_BIST_PATTERN_PRBS23: + val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS23; + break; + case AL_SRDS_BIST_PATTERN_PRBS31: + val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS31; + break; + case AL_SRDS_BIST_PATTERN_CLK1010: + val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_CLK1010; + break; + default: + al_err("%s: invalid pattern (%d)\n", __func__, pattern); + al_assert(0); + } + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_CMNPCSBIST_MODESEL_REG_NUM, + SERDES_IREG_FLD_CMNPCSBIST_MODESEL_MASK, + val); +} + +/******************************************************************************/ +/******************************************************************************/ +void al_serdes_bist_tx_enable( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + al_bool enable) +{ + struct al_serdes_group_info *grp_info = &obj->grp_info[grp]; + + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCSTXBIST_EN_REG_NUM, + SERDES_IREG_FLD_PCSTXBIST_EN, + enable ? SERDES_IREG_FLD_PCSTXBIST_EN : 0); +} + +/******************************************************************************/ +/******************************************************************************/ +void al_serdes_bist_tx_err_inject( + struct al_serdes_obj *obj, + enum al_serdes_group grp) +{ + struct al_serdes_group_info *grp_info = &obj->grp_info[grp]; + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_TXBIST_BITERROR_EN_REG_NUM, + SERDES_IREG_FLD_TXBIST_BITERROR_EN, + SERDES_IREG_FLD_TXBIST_BITERROR_EN); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_TXBIST_BITERROR_EN_REG_NUM, + SERDES_IREG_FLD_TXBIST_BITERROR_EN, + 0); +} + +/******************************************************************************/ +/******************************************************************************/ +void al_serdes_bist_rx_enable( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + al_bool enable) +{ + struct al_serdes_group_info *grp_info = &obj->grp_info[grp]; + + al_serdes_grp_reg_masked_write( + grp_info, + (enum al_serdes_reg_page)lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCSRXBIST_EN_REG_NUM, + SERDES_IREG_FLD_PCSRXBIST_EN, + enable ? SERDES_IREG_FLD_PCSRXBIST_EN : 0); +} + +/******************************************************************************/ +/******************************************************************************/ +#if (SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW_REG_NUM !=\ + SERDES_IREG_FLD_RXBIST_RXLOCKED_REG_NUM) +#error Wrong assumption +#endif + +void al_serdes_bist_rx_status( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + al_bool *is_locked, + al_bool *err_cnt_overflow, + uint16_t *err_cnt) +{ + struct al_serdes_group_info *grp_info = &obj->grp_info[grp]; + uint8_t status_reg_val; + uint16_t err_cnt_msb_reg_val; + uint16_t err_cnt_lsb_reg_val; + + status_reg_val = al_serdes_grp_reg_read( + grp_info, + (enum al_serdes_reg_page)lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXBIST_RXLOCKED_REG_NUM); + + err_cnt_msb_reg_val = al_serdes_grp_reg_read( + grp_info, + (enum al_serdes_reg_page)lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXBIST_ERRCOUNT_MSB_REG_NUM); + + err_cnt_lsb_reg_val = al_serdes_grp_reg_read( + grp_info, + (enum al_serdes_reg_page)lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXBIST_ERRCOUNT_LSB_REG_NUM); + + *is_locked = + (status_reg_val & SERDES_IREG_FLD_RXBIST_RXLOCKED) ? + AL_TRUE : AL_FALSE; + + *err_cnt_overflow = + (status_reg_val & SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW) ? + AL_TRUE : AL_FALSE; + + *err_cnt = (err_cnt_msb_reg_val << 8) + err_cnt_lsb_reg_val; +} + +/******************************************************************************/ +/******************************************************************************/ +static inline uint8_t al_serdes_grp_reg_read( + struct al_serdes_group_info *grp_info, + enum al_serdes_reg_page page, + enum al_serdes_reg_type type, + uint16_t offset) +{ + al_reg_write32( + &grp_info->regs_base->gen.reg_addr, + SRDS_CORE_REG_ADDR(page, type, offset)); + + return al_reg_read32(&grp_info->regs_base->gen.reg_data); +} + +/******************************************************************************/ +/******************************************************************************/ +static inline void al_serdes_grp_reg_write( + struct al_serdes_group_info *grp_info, + enum al_serdes_reg_page page, + enum al_serdes_reg_type type, + uint16_t offset, + uint8_t data) +{ + al_reg_write32( + &grp_info->regs_base->gen.reg_addr, + SRDS_CORE_REG_ADDR(page, type, offset)); + + al_reg_write32(&grp_info->regs_base->gen.reg_data, data); +} + +/******************************************************************************/ +/******************************************************************************/ +static inline void al_serdes_grp_reg_masked_write( + struct al_serdes_group_info *grp_info, + enum al_serdes_reg_page page, + enum al_serdes_reg_type type, + uint16_t offset, + uint8_t mask, + uint8_t data) +{ + uint8_t val; + enum al_serdes_reg_page start_page = page; + enum al_serdes_reg_page end_page = page; + enum al_serdes_reg_page iter_page; + + if (page == AL_SRDS_REG_PAGE_0123_LANES_0123) { + start_page = AL_SRDS_REG_PAGE_0_LANE_0; + end_page = AL_SRDS_REG_PAGE_3_LANE_3; + } + + for(iter_page = start_page; iter_page <= end_page; ++iter_page) { + val = al_serdes_grp_reg_read(grp_info, iter_page, type, offset); + val &= ~mask; + val |= data; + al_serdes_grp_reg_write(grp_info, iter_page, type, offset, val); + } +} + +/******************************************************************************/ +/******************************************************************************/ +int al_serdes_eye_measure_run( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + uint32_t timeout, + unsigned int *value) +{ + uint32_t reg = 0; + uint32_t i; + struct serdes_lane *lane_regs; + + lane_regs = &obj->grp_info[grp].regs_base->lane[lane]; + + al_reg_write32(&lane_regs->ictl_multi_rxeq, + SERDES_LANE_ICTL_MULTI_RXEQ_START_L_A); + + for (i = 0 ; i < timeout ; i++) { + reg = al_reg_read32(&lane_regs->octl_multi); + + if (reg & SERDES_LANE_OCTL_MULTI_RXEQ_DONE_L_A) + break; + + al_msleep(10); + } + + if (i == timeout) { + al_err("%s: measure eye failed on timeout\n", __func__); + return -ETIMEDOUT; + } + + *value = al_reg_read32(&lane_regs->odat_multi_rxeq); + + al_reg_write32(&lane_regs->ictl_multi_rxeq, 0); + + return 0; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_serdes_eye_diag_sample( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + unsigned int x, + int y, + unsigned int timeout, + unsigned int *value) +{ + enum al_serdes_reg_page page = (enum al_serdes_reg_page)lane; + struct al_serdes_group_info *grp_info; + uint32_t i; + uint8_t sample_count_orig_msb; + uint8_t sample_count_orig_lsb; + + al_assert(obj); + al_assert(((int)grp) >= AL_SRDS_GRP_A); + al_assert(((int)grp) <= AL_SRDS_GRP_D); + al_assert(((int)page) >= AL_SRDS_REG_PAGE_0_LANE_0); + al_assert(((int)page) <= AL_SRDS_REG_PAGE_0123_LANES_0123); + + grp_info = &obj->grp_info[grp]; + + /* Obtain sample count by reading RXCALROAMEYEMEAS_COUNT */ + sample_count_orig_msb = al_serdes_grp_reg_read(grp_info, + AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM); + sample_count_orig_lsb = al_serdes_grp_reg_read(grp_info, + AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM); + + /* Set sample count to ~100000 samples */ + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM, 0x13); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM, 0x88); + + /* BER Contour Overwrite */ + al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM, + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN, + 0); + al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM, + SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN, + 0); + al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN, + 0); + + /* RXROAM_XORBITSEL = 0x1 or 0x0 */ + al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM, + SERDES_IREG_FLD_RXROAM_XORBITSEL, + SERDES_IREG_FLD_RXROAM_XORBITSEL_2ND); + + /* Set X */ + al_serdes_grp_reg_write(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMXADJUST_REG_NUM, x); + + /* Set Y */ + al_serdes_grp_reg_write(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMYADJUST_REG_NUM, + y < 32 ? 31 - y : y + 1); + + /* Start Measurement by setting RXCALROAMEYEMEASIN_CYCLEEN = 0x1 */ + al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_REG_NUM, + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START, + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START); + + /* Check RXCALROAMEYEMEASDONE Signal (Polling Until 0x1) */ + for (i = 0 ; i < timeout ; i++) { + if (al_serdes_grp_reg_read(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMEYEMEASDONE_REG_NUM) & + SERDES_IREG_FLD_RXCALROAMEYEMEASDONE) + break; + al_udelay(1); + } + if (i == timeout) { + al_err("%s: eye diagram sampling timed out!\n", __func__); + return -ETIMEDOUT; + } + + /* Stop Measurement by setting RXCALROAMEYEMEASIN_CYCLEEN = 0x0 */ + al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_REG_NUM, + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START, + 0); + + /* Obtain Error Counts by reading RXCALROAMEYEMEAS_ACC */ + *value = ((unsigned int)al_serdes_grp_reg_read(grp_info, page, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_MSB_REG_NUM)) << 8 | + al_serdes_grp_reg_read(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_LSB_REG_NUM); + + /* BER Contour Overwrite */ + al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM, + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN, + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN); + al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM, + SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN, + SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN); + al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN); + + /* Restore sample count */ + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM, + sample_count_orig_msb); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM, + sample_count_orig_lsb); + + return 0; +} + +/******************************************************************************/ +/******************************************************************************/ +static void al_serdes_tx_deemph_set( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + uint32_t c_zero, + uint32_t c_plus_1, + uint32_t c_minus_1) +{ + al_serdes_grp_reg_masked_write( + &obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_TX_DRV_1_REG_NUM, + SERDES_IREG_TX_DRV_1_LEVN_MASK, + ((c_zero + c_plus_1 + c_minus_1) + << SERDES_IREG_TX_DRV_1_LEVN_SHIFT)); + + al_serdes_grp_reg_masked_write( + &obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_TX_DRV_2_REG_NUM, + SERDES_IREG_TX_DRV_2_LEVNM1_MASK, + (c_plus_1 << SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT)); + + al_serdes_grp_reg_masked_write( + &obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_TX_DRV_3_REG_NUM, + SERDES_IREG_TX_DRV_3_LEVNP1_MASK, + (c_minus_1 << SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT)); +} + +static void al_serdes_tx_deemph_get( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + uint32_t *c_zero, + uint32_t *c_plus_1, + uint32_t *c_minus_1) +{ + uint32_t reg = 0; + + reg = al_serdes_grp_reg_read( + &obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_TX_DRV_2_REG_NUM); + + *c_plus_1 = ((reg & SERDES_IREG_TX_DRV_2_LEVNM1_MASK) >> + SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT); + + reg = al_serdes_grp_reg_read( + &obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_TX_DRV_3_REG_NUM); + + *c_minus_1 = ((reg & SERDES_IREG_TX_DRV_3_LEVNP1_MASK) >> + SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT); + + reg = al_serdes_grp_reg_read( + &obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_TX_DRV_1_REG_NUM); + + *c_zero = (((reg & SERDES_IREG_TX_DRV_1_LEVN_MASK) >> + SERDES_IREG_TX_DRV_1_LEVN_SHIFT) - *c_plus_1 - *c_minus_1); +} + +al_bool al_serdes_tx_deemph_inc( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + enum al_serdes_tx_deemph_param param) +{ + al_bool ret = AL_TRUE; + uint32_t c0; + uint32_t c1; + uint32_t c_1; + + al_serdes_tx_deemph_get(obj, grp, lane, &c0, &c1, &c_1); + + al_dbg("%s: current txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n", + __func__, c0, c1, c_1); + + switch (param) { + case AL_SERDES_TX_DEEMP_C_ZERO: + + if (c0 == AL_SERDES_TX_DEEMPH_C_ZERO_MAX_VAL) + return AL_FALSE; + + c0++; + + break; + case AL_SERDES_TX_DEEMP_C_PLUS: + + if (c1 == AL_SERDES_TX_DEEMPH_C_PLUS_MAX_VAL) + return AL_FALSE; + + c1++; + + break; + case AL_SERDES_TX_DEEMP_C_MINUS: + + if (c_1 == AL_SERDES_TX_DEEMPH_C_MINUS_MAX_VAL) + return AL_FALSE; + + c_1++; + + break; + } + + if ((c0 + c1 + c_1) > AL_SERDES_TX_DEEMPH_SUM_MAX) { + al_dbg("%s: sum of all tx de-emphasis over the max limit\n", + __func__); + + return AL_FALSE; + } + + al_dbg("%s: new txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n", + __func__, c0, c1, c_1); + + al_serdes_tx_deemph_set(obj, grp, lane, c0, c1, c_1); + + return ret; +} + +al_bool al_serdes_tx_deemph_dec( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + enum al_serdes_tx_deemph_param param) +{ + al_bool ret = AL_TRUE; + uint32_t c0; + uint32_t c1; + uint32_t c_1; + + al_serdes_tx_deemph_get(obj, grp, lane, &c0, &c1, &c_1); + + al_dbg("%s: current txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n", + __func__, c0, c1, c_1); + + switch (param) { + case AL_SERDES_TX_DEEMP_C_ZERO: + + if (c0 == AL_SERDES_TX_DEEMPH_C_ZERO_MIN_VAL) + return AL_FALSE; + + c0--; + + break; + case AL_SERDES_TX_DEEMP_C_PLUS: + + if (c1 == AL_SERDES_TX_DEEMPH_C_PLUS_MIN_VAL) + return AL_FALSE; + + c1--; + + break; + case AL_SERDES_TX_DEEMP_C_MINUS: + + if (c_1 == AL_SERDES_TX_DEEMPH_C_MINUS_MIN_VAL) + return AL_FALSE; + + c_1--; + + break; + } + + al_dbg("%s: new txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n", + __func__, c0, c1, c_1); + + al_serdes_tx_deemph_set(obj, grp, lane, c0, c1, c_1); + + return ret; +} + +void al_serdes_tx_deemph_preset( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane) +{ + uint32_t c0; + uint32_t c1; + uint32_t c_1; + + c0 = AL_SERDES_TX_DEEMPH_C_ZERO_PRESET; + + c1 = AL_SERDES_TX_DEEMPH_C_PLUS_PRESET; + + c_1 = AL_SERDES_TX_DEEMPH_C_MINUS_PRESET; + + al_dbg("preset: new txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n", + c0, c1, c_1); + + al_serdes_tx_deemph_set(obj, grp, lane, c0, c1, c_1); +} + +al_bool al_serdes_signal_is_detected( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane) +{ + uint32_t reg = 0; + + reg = al_serdes_grp_reg_read( + &obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXRANDET_REG_NUM); + + return ((reg & SERDES_IREG_FLD_RXRANDET_STAT) ? AL_TRUE : AL_FALSE); +} + +void al_serdes_tx_advanced_params_set(struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + struct al_serdes_adv_tx_params *params) +{ + uint8_t reg = 0; + + if(!params->override) { + al_serdes_grp_reg_masked_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM, + SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN, + SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN); + + return; + } + + al_serdes_grp_reg_masked_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM, + SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN, + 0); + + AL_REG_FIELD_SET(reg, + SERDES_IREG_TX_DRV_1_HLEV_MASK, + SERDES_IREG_TX_DRV_1_HLEV_SHIFT, + params->amp); + + AL_REG_FIELD_SET(reg, + SERDES_IREG_TX_DRV_1_LEVN_MASK, + SERDES_IREG_TX_DRV_1_LEVN_SHIFT, + params->total_driver_units); + + al_serdes_grp_reg_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_TX_DRV_1_REG_NUM, + reg); + + reg = 0; + AL_REG_FIELD_SET(reg, + SERDES_IREG_TX_DRV_2_LEVNM1_MASK, + SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT, + params->c_plus_1); + + AL_REG_FIELD_SET(reg, + SERDES_IREG_TX_DRV_2_LEVNM2_MASK, + SERDES_IREG_TX_DRV_2_LEVNM2_SHIFT, + params->c_plus_2); + + al_serdes_grp_reg_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_TX_DRV_2_REG_NUM, + reg); + + reg = 0; + AL_REG_FIELD_SET(reg, + SERDES_IREG_TX_DRV_3_LEVNP1_MASK, + SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT, + params->c_minus_1); + + AL_REG_FIELD_SET(reg, + SERDES_IREG_TX_DRV_3_SLEW_MASK, + SERDES_IREG_TX_DRV_3_SLEW_SHIFT, + params->slew_rate); + + al_serdes_grp_reg_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_TX_DRV_3_REG_NUM, + reg); + +} + +void al_serdes_tx_advanced_params_get(struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + struct al_serdes_adv_tx_params *tx_params) +{ + uint8_t reg_val = 0; + + al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_TX_DRV_1_REG_NUM, + ®_val); + tx_params->amp = (reg_val & SERDES_IREG_TX_DRV_1_HLEV_MASK) >> + SERDES_IREG_TX_DRV_1_HLEV_SHIFT; + tx_params->total_driver_units = (reg_val & + SERDES_IREG_TX_DRV_1_LEVN_MASK) >> + SERDES_IREG_TX_DRV_1_LEVN_SHIFT; + + al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_TX_DRV_2_REG_NUM, + ®_val); + tx_params->c_plus_1 = (reg_val & SERDES_IREG_TX_DRV_2_LEVNM1_MASK) >> + SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT; + tx_params->c_plus_2 = (reg_val & SERDES_IREG_TX_DRV_2_LEVNM2_MASK) >> + SERDES_IREG_TX_DRV_2_LEVNM2_SHIFT; + + al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_TX_DRV_3_REG_NUM, + ®_val); + tx_params->c_minus_1 = (reg_val & SERDES_IREG_TX_DRV_3_LEVNP1_MASK) >> + SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT; + tx_params->slew_rate = (reg_val & SERDES_IREG_TX_DRV_3_SLEW_MASK) >> + SERDES_IREG_TX_DRV_3_SLEW_SHIFT; + + al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM, + ®_val); + tx_params->override = ((reg_val & SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN) == 0); +} + + +void al_serdes_rx_advanced_params_set(struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + struct al_serdes_adv_rx_params *params) +{ + uint8_t reg = 0; + + if(!params->override) { + al_serdes_grp_reg_masked_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM, + SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN, + SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN); + + return; + } + + al_serdes_grp_reg_masked_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM, + SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN, + 0); + + AL_REG_FIELD_SET(reg, + SERDES_IREG_RX_CALEQ_1_DCGAIN_MASK, + SERDES_IREG_RX_CALEQ_1_DCGAIN_SHIFT, + params->dcgain); + + AL_REG_FIELD_SET(reg, + SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_MASK, + SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_SHIFT, + params->dfe_3db_freq); + + al_serdes_grp_reg_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_RX_CALEQ_1_REG_NUM, + reg); + + reg = 0; + AL_REG_FIELD_SET(reg, + SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_MASK, + SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_SHIFT, + params->dfe_gain); + + AL_REG_FIELD_SET(reg, + SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_MASK, + SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_SHIFT, + params->dfe_first_tap_ctrl); + + al_serdes_grp_reg_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_RX_CALEQ_2_REG_NUM, + reg); + + reg = 0; + AL_REG_FIELD_SET(reg, + SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_MASK, + SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_SHIFT, + params->dfe_secound_tap_ctrl); + + AL_REG_FIELD_SET(reg, + SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_MASK, + SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_SHIFT, + params->dfe_third_tap_ctrl); + + al_serdes_grp_reg_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_RX_CALEQ_3_REG_NUM, + reg); + + reg = 0; + AL_REG_FIELD_SET(reg, + SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_MASK, + SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_SHIFT, + params->dfe_fourth_tap_ctrl); + + AL_REG_FIELD_SET(reg, + SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_MASK, + SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_SHIFT, + params->low_freq_agc_gain); + + al_serdes_grp_reg_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_RX_CALEQ_4_REG_NUM, + reg); + + reg = 0; + AL_REG_FIELD_SET(reg, + SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_MASK, + SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_SHIFT, + params->precal_code_sel); + + AL_REG_FIELD_SET(reg, + SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_MASK, + SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_SHIFT, + params->high_freq_agc_boost); + + al_serdes_grp_reg_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_RX_CALEQ_5_REG_NUM, + reg); +} + +static inline void al_serdes_ns_delay(int cnt) +{ + al_udelay((cnt + 999) / 1000); +} + +static inline void al_serdes_common_cfg_eth(struct al_serdes_group_info *grp_info) +{ + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_REG_NUM, + SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_MASK, + (0x1 << SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_REG_NUM, + SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_MASK, + (0 << SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_REG_NUM, + SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_MASK, + (0x2 << SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_REG_NUM, + SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_MASK, + (0 << SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_COARSE_STEP_REG_NUM, + SERDES_IREG_FLD_RXEQ_COARSE_STEP_MASK, + (0x1 << SERDES_IREG_FLD_RXEQ_COARSE_STEP_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_REG_NUM, + SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_MASK, + (0x1 << SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_REG_NUM, + SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_MASK, + (0xf0 << SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_REG_NUM, + SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_MASK, + (0 << SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_FINE_STEP_REG_NUM, + SERDES_IREG_FLD_RXEQ_FINE_STEP_MASK, + (1 << SERDES_IREG_FLD_RXEQ_FINE_STEP_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_REG_NUM, + SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_MASK, + (0x8 << SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_REG_NUM, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_MASK, + (0 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_REG_NUM, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_MASK, + (0x64 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_MASK, + (0x3 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_MASK, + (0x1 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_MASK, + (3 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_MASK, + (1 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM, + SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_MASK, + (0xc << SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_SHIFT)); + + al_serdes_grp_reg_masked_write( + grp_info, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM, + SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_MASK, + (0xcc << SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_SHIFT)); +} + +struct al_serdes_mode_rx_tx_inv_state { + al_bool restore; + uint32_t pipe_rst; + uint32_t ipd_multi[AL_SRDS_NUM_LANES]; + uint8_t inv_value[AL_SRDS_NUM_LANES]; +}; + +static void al_serdes_mode_rx_tx_inv_state_save( + struct al_serdes_group_info *grp_info, + struct al_serdes_mode_rx_tx_inv_state *state) +{ + if (al_reg_read32(&grp_info->regs_base->gen.irst) & SERDES_GEN_IRST_POR_B_A) { + int i; + + state->restore = AL_TRUE; + state->pipe_rst = al_reg_read32(&grp_info->regs_base->gen.irst); + + for (i = 0; i < AL_SRDS_NUM_LANES; i++) { + state->inv_value[i] = al_serdes_grp_reg_read( + grp_info, + i, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_POLARITY_RX_REG_NUM); + state->ipd_multi[i] = + al_reg_read32(&grp_info->regs_base->lane[i].ipd_multi); + } + } else { + state->restore = AL_FALSE; + } +} + +static void al_serdes_mode_rx_tx_inv_state_restore( + struct al_serdes_group_info *grp_info, + struct al_serdes_mode_rx_tx_inv_state *state) +{ + if (state->restore) { + int i; + + for (i = 0; i < AL_SRDS_NUM_LANES; i++) { + al_serdes_grp_reg_write( + grp_info, + i, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_POLARITY_RX_REG_NUM, + state->inv_value[i]); + al_reg_write32( + &grp_info->regs_base->lane[i].ipd_multi, state->ipd_multi[i]); + al_reg_write32_masked( + &grp_info->regs_base->gen.irst, + (SERDES_GEN_IRST_PIPE_RST_L0_B_A_SEL >> i) | + (SERDES_GEN_IRST_PIPE_RST_L0_B_A >> i), + state->pipe_rst); + } + } +} + +void al_serdes_mode_set_sgmii( + struct al_serdes_obj *obj, + enum al_serdes_group grp) +{ + struct al_serdes_group_info *grp_info; + struct al_serdes_mode_rx_tx_inv_state rx_tx_inv_state; + + al_assert(obj); + al_assert(((int)grp) >= AL_SRDS_GRP_A); + al_assert(((int)grp) <= AL_SRDS_GRP_D); + + grp_info = &obj->grp_info[grp]; + + al_serdes_mode_rx_tx_inv_state_save(grp_info, &rx_tx_inv_state); + + al_reg_write32(&grp_info->regs_base->gen.irst, 0x000000); + al_reg_write32(&grp_info->regs_base->lane[0].ictl_multi, 0x10110010); + al_reg_write32(&grp_info->regs_base->lane[1].ictl_multi, 0x10110010); + al_reg_write32(&grp_info->regs_base->lane[2].ictl_multi, 0x10110010); + al_reg_write32(&grp_info->regs_base->lane[3].ictl_multi, 0x10110010); + al_reg_write32(&grp_info->regs_base->gen.ipd_multi_synth , 0x0001); + al_reg_write32(&grp_info->regs_base->lane[0].ipd_multi, 0x0003); + al_reg_write32(&grp_info->regs_base->lane[1].ipd_multi, 0x0003); + al_reg_write32(&grp_info->regs_base->lane[2].ipd_multi, 0x0003); + al_reg_write32(&grp_info->regs_base->lane[3].ipd_multi, 0x0003); + al_reg_write32(&grp_info->regs_base->gen.ictl_pcs , 0); + al_reg_write32(&grp_info->regs_base->gen.irst, 0x001000); + al_serdes_ns_delay(800); + al_reg_write32(&grp_info->regs_base->gen.irst, 0x000000); + al_serdes_ns_delay(500); + al_reg_write32(&grp_info->regs_base->gen.irst, 0x001000); + al_serdes_ns_delay(500); + + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 101, 183); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 102, 183); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 103, 12); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 104, 12); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 105, 26); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 106, 26); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 107, 2); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 108, 2); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 109, 17); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 110, 13); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 101, 153); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 102, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 103, 108); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 104, 183); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 105, 183); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 106, 12); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 107, 12); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 108, 26); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 109, 26); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 110, 7); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 111, 12); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 112, 8); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 113, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 114, 8); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 115, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 116, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 117, 179); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 118, 246); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 119, 208); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 120, 239); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 121, 251); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 122, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 123, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 124, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 125, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 126, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 127, 211); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 128, 211); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 129, 226); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 130, 239); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 131, 251); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 132, 251); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 133, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 134, 239); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 135, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 136, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 137, 211); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 138, 211); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 139, 226); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 140, 239); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 141, 251); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 142, 251); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 143, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 144, 239); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 145, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 146, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 147, 251); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 148, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 149, 63); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 150, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 151, 100); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 152, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 153, 4); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 154, 2); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 155, 5); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 156, 5); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 157, 4); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 158, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 159, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 160, 8); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 161, 4); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 162, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 163, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 164, 4); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0_LANE_0, + AL_SRDS_REG_TYPE_PMA, 7, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_1_LANE_1, + AL_SRDS_REG_TYPE_PMA, 7, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_2_LANE_2, + AL_SRDS_REG_TYPE_PMA, 7, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_3_LANE_3, + AL_SRDS_REG_TYPE_PMA, 7, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 13, 16); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 48, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 49, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 54, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 55, 180); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 93, 2); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 165, 3); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 41, 6); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 354, 3); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 355, 58); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 356, 9); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 357, 3); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 358, 62); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 359, 12); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 701, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 87, 0x1f); + + al_serdes_common_cfg_eth(grp_info); + + al_serdes_mode_rx_tx_inv_state_restore(grp_info, &rx_tx_inv_state); + + al_reg_write32(&grp_info->regs_base->gen.irst, 0x0011F0); + al_serdes_ns_delay(500); +} + +void al_serdes_mode_set_kr( + struct al_serdes_obj *obj, + enum al_serdes_group grp) +{ + struct al_serdes_group_info *grp_info; + struct al_serdes_mode_rx_tx_inv_state rx_tx_inv_state; + + al_assert(obj); + al_assert(((int)grp) >= AL_SRDS_GRP_A); + al_assert(((int)grp) <= AL_SRDS_GRP_D); + + grp_info = &obj->grp_info[grp]; + + al_serdes_mode_rx_tx_inv_state_save(grp_info, &rx_tx_inv_state); + + al_reg_write32(&grp_info->regs_base->gen.irst, 0x000000); + al_reg_write32(&grp_info->regs_base->lane[0].ictl_multi, 0x30330030); + al_reg_write32(&grp_info->regs_base->lane[1].ictl_multi, 0x30330030); + al_reg_write32(&grp_info->regs_base->lane[2].ictl_multi, 0x30330030); + al_reg_write32(&grp_info->regs_base->lane[3].ictl_multi, 0x30330030); + al_reg_write32(&grp_info->regs_base->gen.ipd_multi_synth , 0x0001); + al_reg_write32(&grp_info->regs_base->lane[0].ipd_multi, 0x0003); + al_reg_write32(&grp_info->regs_base->lane[1].ipd_multi, 0x0003); + al_reg_write32(&grp_info->regs_base->lane[2].ipd_multi, 0x0003); + al_reg_write32(&grp_info->regs_base->lane[3].ipd_multi, 0x0003); + al_reg_write32(&grp_info->regs_base->gen.ictl_pcs , 0); + al_reg_write32(&grp_info->regs_base->gen.irst, 0x001000); + al_serdes_ns_delay(800); + al_reg_write32(&grp_info->regs_base->gen.irst, 0x000000); + al_serdes_ns_delay(500); + al_reg_write32(&grp_info->regs_base->gen.irst, 0x001000); + al_serdes_ns_delay(500); + + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 101, 189); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 102, 189); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 103, 6); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 104, 6); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 105, 27); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 106, 27); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 107, 1); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 108, 1); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 109, 119); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 110, 5); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 101, 170); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 102, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 103, 108); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 104, 189); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 105, 189); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 106, 6); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 107, 6); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 108, 27); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 109, 27); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 110, 7); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 111, 12); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 112, 16); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 113, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 114, 16); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 115, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 116, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 117, 179); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 118, 246); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 119, 208); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 120, 239); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 121, 251); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 122, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 123, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 124, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 125, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 126, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 127, 211); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 128, 211); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 129, 226); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 130, 239); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 131, 251); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 132, 251); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 133, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 134, 239); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 135, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 136, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 137, 211); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 138, 211); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 139, 226); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 140, 239); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 141, 251); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 142, 251); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 143, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 144, 239); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 145, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 146, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 147, 251); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 148, 255); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 149, 63); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 150, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 151, 50); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 152, 17); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 153, 2); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 154, 1); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 155, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 156, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 157, 4); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 158, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 159, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 160, 8); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 161, 4); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 162, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 163, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 164, 4); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0_LANE_0, + AL_SRDS_REG_TYPE_PMA, 7, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_1_LANE_1, + AL_SRDS_REG_TYPE_PMA, 7, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_2_LANE_2, + AL_SRDS_REG_TYPE_PMA, 7, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_3_LANE_3, + AL_SRDS_REG_TYPE_PMA, 7, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 13, 16); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 48, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 49, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 54, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 55, 149); /*Was 182*/ + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 93, 2); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 165, 3); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 41, 6); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 354, 3); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 355, 58); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 356, 9); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 357, 3); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 358, 62); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, 359, 12); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 701, 0); + al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123, + AL_SRDS_REG_TYPE_PMA, 87, 0x1f); + + al_serdes_common_cfg_eth(grp_info); + + al_serdes_mode_rx_tx_inv_state_restore(grp_info, &rx_tx_inv_state); + + al_reg_write32(&grp_info->regs_base->gen.irst, 0x0011F0); + al_serdes_ns_delay(500); +} + +void al_serdes_rx_advanced_params_get(struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + struct al_serdes_adv_rx_params* rx_params) +{ + uint8_t temp_val; + + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_RX_CALEQ_1_REG_NUM, + &temp_val); + rx_params->dcgain = (temp_val & SERDES_IREG_RX_CALEQ_1_DCGAIN_MASK) >> + SERDES_IREG_RX_CALEQ_1_DCGAIN_SHIFT; + rx_params->dfe_3db_freq = (temp_val & + SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_MASK) >> + SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_SHIFT; + + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_RX_CALEQ_2_REG_NUM, + &temp_val); + rx_params->dfe_gain = (temp_val & + SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_MASK) >> + SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_SHIFT; + rx_params->dfe_first_tap_ctrl = (temp_val & + SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_MASK) >> + SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_SHIFT; + + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_RX_CALEQ_3_REG_NUM, + &temp_val); + rx_params->dfe_secound_tap_ctrl = (temp_val & + SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_MASK) >> + SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_SHIFT; + rx_params->dfe_third_tap_ctrl = (temp_val & + SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_MASK) >> + SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_SHIFT; + + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_RX_CALEQ_4_REG_NUM, + &temp_val); + rx_params->dfe_fourth_tap_ctrl = (temp_val & + SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_MASK) >> + SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_SHIFT; + rx_params->low_freq_agc_gain = (temp_val & + SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_MASK) >> + SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_SHIFT; + + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_RX_CALEQ_5_REG_NUM, + &temp_val); + rx_params->precal_code_sel = (temp_val & + SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_MASK) >> + SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_SHIFT; + rx_params->high_freq_agc_boost = (temp_val & + SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_MASK) >> + SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_SHIFT; + + al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM, + &temp_val); + rx_params->override = ((temp_val & SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN) == 0); +} + +#if ( SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM != \ + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM || \ + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM != \ + SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM) +#error Wrong assumption +#endif +int al_serdes_rx_equalization( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane) +{ + uint8_t serdes_ireg_fld_rxcalroamyadjust_locwren_val; + uint8_t serdes_ireg_fld_rxroam_xorbitsel_val; + uint8_t serdes_ireg_fld_pcsrxeq_locwren_val; + uint8_t serdes_ireg_fld_rxcal_locwren_val; + uint8_t temp_val; + uint8_t done; + + int test_score; + int i; + + /* + * Make sure Roam Eye mechanism is not overridden + * Lane SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN = 1, + * so Rx 4-Point Eye process is not overridden + * Lane SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN = 1, + * so Eye Roam latch is not overridden + * Lane SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN = 1, + * so Eye Roam latch 'X adjust' is not overridden + * Lane SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN = 1, + * so Eye Roam latch 'Y adjust' is not overridden + * Lane SERDES_IREG_FLD_RXROAM_XORBITSEL = 0/1, + * so Eye Roamlatch works on the right Eye position (XORBITSEL) + * For most cases 0 is needed, but sometimes 1 is needed. + * I couldn't sort out why is this so the code uses a global + * XORBITSELmode variable, set by the user (GUI). Default is 0. + * control must be internal. At the end we restore original setting + */ + + /* save current values for restoring them later in the end */ + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM, + &serdes_ireg_fld_rxcal_locwren_val); + + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM, + &serdes_ireg_fld_rxcalroamyadjust_locwren_val ); + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM, + &serdes_ireg_fld_rxroam_xorbitsel_val ); + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM, + &serdes_ireg_fld_pcsrxeq_locwren_val ); + + /* + * Set Bits: + * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN + * SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN + * SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN + * to return 4pt-RxEye and EyeRoam Latch to internal logic + * + * clear bit SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN + * AGC/DFE controlled via PMA registers + */ + temp_val = serdes_ireg_fld_rxcal_locwren_val; + temp_val |= SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN; + temp_val |= SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN; + temp_val |= SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN; + temp_val |= SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN; + + al_serdes_reg_write( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM, + temp_val ); + + /* + * Set bit SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN + * to return EyeRoam Latch Y to internal logic + */ + temp_val = serdes_ireg_fld_rxcalroamyadjust_locwren_val | + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN; + al_serdes_reg_write( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM, + temp_val ); + + /* + * Clear Bit: SERDES_IREG_FLD_RXROAM_XORBITSEL + * so XORBITSEL=0, needed for the Eye mapping. + */ + temp_val = serdes_ireg_fld_rxroam_xorbitsel_val & + ~SERDES_IREG_FLD_RXROAM_XORBITSEL; + al_serdes_reg_write( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM, + temp_val ); + + /* + * Take Control from int.pin over RxEQ process. + * Clear Bit SERDES_IREG_FLD_PCSRXEQ_LOCWREN + * to override RxEQ via PMA + */ + temp_val = serdes_ireg_fld_pcsrxeq_locwren_val & + ~SERDES_IREG_FLD_PCSRXEQ_LOCWREN; + al_serdes_reg_write( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM, + temp_val ); + + + /* + * Start/Stop RxEQ Cal is via PCSRXEQ_START: 1=START. 0=STOP. + * Clear Bit SERDES_IREG_FLD_PCSRXEQ_START + * to start fresh from Stop + */ + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM, + &temp_val ); + temp_val &= ~SERDES_IREG_FLD_PCSRXEQ_START; + al_serdes_reg_write( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM, + temp_val ); + + /* Set Bit SERDES_IREG_FLD_PCSRXEQ_START + * to begin Rx Eq Cal */ + temp_val |= SERDES_IREG_FLD_PCSRXEQ_START; + al_serdes_reg_write( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM, + temp_val ); + + /* Poll on RxEq Cal completion. SERDES_IREG_FLD_RXEQ_DONE. 1=Done. */ + for( i = 0; i < AL_SERDES_RX_EQUAL_TRIES; ++i ) { + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMEYEMEASDONE_REG_NUM, + &done ); + done &= SERDES_IREG_FLD_RXEQ_DONE; + + /* Check if RxEQ Cal is done */ + if (done) + break; + al_msleep(AL_SERDES_RX_EQUAL_MDELAY); + } + + if (!done) { + al_err("%s: Timeout!\n", __func__); + return -1; + } + + /* Stop the RxEQ process. */ + temp_val &= ~SERDES_IREG_FLD_PCSRXEQ_START; + al_serdes_reg_write( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM, + temp_val ); + /* Get score */ + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_RXEQ_BEST_EYE_MSB_VAL_REG_NUM, + &temp_val ); + test_score = (int)( (temp_val & 0xFF) << 6 ); + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_REG_NUM, + &temp_val ); + test_score += (int)(temp_val & SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_MASK); + + /* Restore start values */ + al_serdes_reg_write( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM, + serdes_ireg_fld_rxcal_locwren_val); + al_serdes_reg_write( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM, + serdes_ireg_fld_rxcalroamyadjust_locwren_val ); + al_serdes_reg_write( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM, + serdes_ireg_fld_rxroam_xorbitsel_val ); + al_serdes_reg_write( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM, + serdes_ireg_fld_pcsrxeq_locwren_val ); + + return test_score; +} + +#if ( SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM != \ + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM || \ + SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM != \ + SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM || \ + SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM != \ + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM) +#error Wrong assumption +#endif +int al_serdes_calc_eye_size( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + int* width, + int* height) +{ + uint8_t rxcaleyediagfsm_x_y_valweight_val; + uint8_t rxcaleyediagfsm_xvalcoarse_val; + uint8_t rxcaleyediagfsm_xvalfine_val; + uint8_t rxcaleyediagfsm_yvalcoarse_val; + uint8_t rxcaleyediagfsm_yvalfine_val; + uint8_t rxlock2ref_locwren_val; + uint8_t rxcal_locwren_val; + uint8_t rxcalroamyadjust_locwren_val; + uint8_t rxlock2ref_ovren_val; + + int i; + uint8_t status; + uint8_t reg_value; + + /* Save Registers */ + al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM, + &rxlock2ref_locwren_val); + al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM, + &rxcal_locwren_val); + al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM, + &rxcalroamyadjust_locwren_val); + al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM, + &rxlock2ref_ovren_val); + + al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM, + &rxcaleyediagfsm_x_y_valweight_val); + al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM, + &rxcaleyediagfsm_xvalcoarse_val); + al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM, + &rxcaleyediagfsm_xvalfine_val); + al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM, + &rxcaleyediagfsm_yvalcoarse_val); + al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM, + &rxcaleyediagfsm_yvalfine_val); + + /* + * Clear Bit: + * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN + * to override RxEQ via PMA + * Set Bits: + * SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN, + * SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN + * to keep Eye Diag Roam controlled internally + */ + al_serdes_grp_reg_masked_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM, + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN | + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN | + SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN, + SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN | + SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN); + /* + * Set Bit: + * SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN + * to keep Eye Diag Roam controlled internally + */ + al_serdes_grp_reg_masked_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN); + + /* + * Clear Bit: + * SERDES_IREG_FLD_RXROAM_XORBITSEL, + * so XORBITSEL=0, needed for the Eye mapping + * Set Bit: + * SERDES_IREG_FLD_RXLOCK2REF_OVREN, + * so RXLOCK2REF_OVREN=1, keeping lock to data, preventing data hit + */ + al_serdes_grp_reg_masked_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM, + SERDES_IREG_FLD_RXLOCK2REF_OVREN | + SERDES_IREG_FLD_RXROAM_XORBITSEL, + SERDES_IREG_FLD_RXLOCK2REF_OVREN); + + + /* + * Clear Bit: + * SERDES_IREG_FLD_RXLOCK2REF_LOCWREN, + * so RXLOCK2REF_LOCWREN=0, to override control + */ + al_serdes_grp_reg_masked_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM, + SERDES_IREG_FLD_RXLOCK2REF_LOCWREN, + 0); + + /* Width Calculation */ + + /* Return Value = 0*Y + 1*X */ + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM, + 0x01); + /* X coarse scan step = 3 */ + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM, + 0x03); + /* X fine scan step = 1 */ + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM, + 0x01); + /* Y coarse scan step = 0 */ + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM, + 0x00); + /* Y fine scan step = 0 */ + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM, + 0x00); + + /* + * Set Bit: + * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START, + * to start Eye measurement + */ + al_serdes_grp_reg_masked_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM, + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START, + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START); + + for( i = 0; i < AL_SERDES_RX_EYE_CAL_TRIES; ++i ) { + /* Check if RxEQ Cal is done */ + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE_REG_NUM, + &status ); + if (status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE) + break; + al_msleep(AL_SERDES_RX_EYE_CAL_MDELAY); + } + + if (status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR) { + al_err("%s: eye measure error!\n", __func__); + return -1; + } + + if (!(status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE)) { + al_err("%s: eye measure timeout!\n", __func__); + return -1; + } + + /* Read Eye Opening Metrics, Bits: + * SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB, + * SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB + */ + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_REG_NUM, + ®_value ); + *width = reg_value << 6; + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_REG_NUM, + ®_value ); + *width =+ reg_value & SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_MAKE; + + /* + * Clear Bit: + * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START, + * to stop Eye measurement + */ + al_serdes_grp_reg_masked_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM, + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START, + 0); + + /* Height Calculation */ + + /* Return Value = 1*Y + 0*X */ + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM, + 0x10); + /* X coarse scan step = 0 */ + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM, + 0x00); + /* X fine scan step = 0 */ + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM, + 0x00); + /* Y coarse scan step = 3 */ + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM, + 0x03); + /* Y fine scan step = 1 */ + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM, + 0x01); + + /* + * Set Bit: + * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START, + * to start Eye measurement + */ + al_serdes_grp_reg_masked_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM, + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START, + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START); + + for( i = 0; i < AL_SERDES_RX_EYE_CAL_TRIES; ++i ) { + /* Check if RxEQ Cal is done */ + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE_REG_NUM, + &status ); + if (status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE) + break; + al_msleep(AL_SERDES_RX_EYE_CAL_MDELAY); + } + + if (status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR) { + al_err("%s: eye measure error!\n", __func__); + return -1; + } + + if (!(status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE)) { + al_err("%s: eye measure timeout!\n", __func__); + return -1; + } + + /* Read Eye Opening Metrics, Bits: + * SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB, + * SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB + */ + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_REG_NUM, + ®_value ); + *height = reg_value << 6; + al_serdes_reg_read( + obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_REG_NUM, + ®_value ); + *height =+ reg_value & SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_MAKE; + + /* + * Clear Bit: + * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START, + * to stop Eye measurement + */ + al_serdes_grp_reg_masked_write(&obj->grp_info[grp], + lane, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM, + SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START, + 0); + + /* Restore Registers */ + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM, + rxcaleyediagfsm_x_y_valweight_val); + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM, + rxcaleyediagfsm_xvalcoarse_val); + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM, + rxcaleyediagfsm_xvalfine_val); + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM, + rxcaleyediagfsm_yvalcoarse_val); + al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM, + rxcaleyediagfsm_yvalfine_val); + + al_serdes_reg_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM, + rxlock2ref_locwren_val); + al_serdes_reg_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM, + rxcal_locwren_val); + al_serdes_reg_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM, + rxcalroamyadjust_locwren_val); + al_serdes_reg_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM, + rxlock2ref_ovren_val); + return 0; +} diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_ssm.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_ssm.c new file mode 100644 index 00000000000000..45e018c6cfac83 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_ssm.c @@ -0,0 +1,218 @@ +/******************************************************************************* +Copyright (C) 2014 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @{ + * @file al_hal_ssm.c + * + */ + +#include + +/** + * Initialize acceleration DMA for RAID/Crypto usage + * + * @param ssm_dma ssm dma handle + * @param params parameters from upper layer + * + * @return 0 on success. + */ +int al_ssm_dma_init( + struct al_ssm_dma *ssm_dma, + struct al_ssm_dma_params *params) +{ + struct al_m2m_udma_params m2m_params; + struct unit_regs __iomem *unit_regs; + int rc; + + al_dbg("ssm [%s]: Initialize unit\n", params->name); + + ssm_dma->dev_id = params->dev_id; + ssm_dma->rev_id = params->rev_id; + + m2m_params.name = params->name; + unit_regs = (struct unit_regs __iomem *)params->udma_regs_base; + m2m_params.m2s_regs_base = &unit_regs->m2s; + m2m_params.s2m_regs_base = &unit_regs->s2m; + m2m_params.num_of_queues = params->num_of_queues; + m2m_params.max_m2s_descs_per_pkt = AL_SSM_MAX_SRC_DESCS; + m2m_params.max_s2m_descs_per_pkt = AL_SSM_MAX_DST_DESCS; + + /* initialize the udma */ + rc = al_m2m_udma_init(&ssm_dma->m2m_udma, &m2m_params); + if (rc != 0) + al_err("failed to initialize udma, error %d\n", rc); + return rc; +} + +/** + * Initialize the m2s(tx) and s2m(rx) components of the queue + * + * @param ssm_dma ssm dma handle + * @param qid queue index + * @param tx_params TX UDMA params + * @param rx_params RX UDMA params + * @param q_type indicate q type (crc/csum/memcpy, crypto, raid) + * + * @return 0 if no error found. + * -EINVAL if the qid is out of range + * -EIO if queue was already initialized + */ +int al_ssm_dma_q_init(struct al_ssm_dma *ssm_dma, + uint32_t qid, + struct al_udma_q_params *tx_params, + struct al_udma_q_params *rx_params, + enum al_ssm_q_type q_type) +{ + int rc; + + al_dbg("ssm [%s]: Initialize queue %d\n", + ssm_dma->m2m_udma.name, qid); + + tx_params->dev_id = ssm_dma->dev_id; + tx_params->rev_id = ssm_dma->rev_id; + rx_params->dev_id = ssm_dma->dev_id; + rx_params->rev_id = ssm_dma->rev_id; + + rc = al_m2m_udma_q_init(&ssm_dma->m2m_udma, qid, tx_params, rx_params); + if (rc != 0) + al_err("ssm [%s]: failed to initialize tx q %d, error %d\n", + ssm_dma->m2m_udma.name, qid, rc); + else + ssm_dma->q_types[qid] = q_type; + + return rc; +} + +/** + * Change the DMA state + * + * @param ssm_dma ssm DMA handle + * @param dma_state the new state + * + * @return 0 + */ +int al_ssm_dma_state_set( + struct al_ssm_dma *ssm_dma, + enum al_udma_state dma_state) +{ + int rc; + + rc = al_m2m_udma_state_set(&ssm_dma->m2m_udma, dma_state); + if (rc != 0) + al_err("ssm [%s]: failed to change state, error %d\n", + ssm_dma->m2m_udma.name, rc); + return rc; +} + +/** + * Get udma handle of the tx or rx udma, this handle can be used to call misc + * configuration functions defined at al_udma_config.h + * + * @param ssm_dma ssm DMA handle + * @param type tx or rx udma + * @param udma the requested udma handle written to this pointer + * + * @return 0 + */ +int al_ssm_dma_handle_get( + struct al_ssm_dma *ssm_dma, + enum al_udma_type type, + struct al_udma **udma) +{ + return al_m2m_udma_handle_get(&ssm_dma->m2m_udma, type, udma); +} + +/****************************************************************************** + ******************************************************************************/ +struct al_udma *al_ssm_dma_tx_udma_handle_get( + struct al_ssm_dma *ssm_dma) +{ + struct al_udma *udma; + int err; + + err = al_m2m_udma_handle_get(&ssm_dma->m2m_udma, UDMA_TX, &udma); + if (err) + return NULL; + + return udma; +} + +/****************************************************************************** + ******************************************************************************/ +struct al_udma_q *al_ssm_dma_tx_queue_handle_get( + struct al_ssm_dma *ssm_dma, + unsigned int qid) +{ + struct al_udma *udma; + int err; + + err = al_m2m_udma_handle_get(&ssm_dma->m2m_udma, UDMA_TX, &udma); + if (err) + return NULL; + + return &udma->udma_q[qid]; +} + +/****************************************************************************** + ******************************************************************************/ +struct al_udma *al_ssm_dma_rx_udma_handle_get( + struct al_ssm_dma *ssm_dma) +{ + struct al_udma *udma; + int err; + + err = al_m2m_udma_handle_get(&ssm_dma->m2m_udma, UDMA_RX, &udma); + if (err) + return NULL; + + return udma; +} + +/****************************************************************************** + ******************************************************************************/ +struct al_udma_q *al_ssm_dma_rx_queue_handle_get( + struct al_ssm_dma *ssm_dma, + unsigned int qid) +{ + struct al_udma *udma; + int err; + + err = al_m2m_udma_handle_get(&ssm_dma->m2m_udma, UDMA_RX, &udma); + if (err) + return NULL; + + return &udma->udma_q[qid]; +} diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_config.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_config.c new file mode 100644 index 00000000000000..9fc9697d926f48 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_config.c @@ -0,0 +1,1314 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @file al_hal_udma_config.c + * + * @brief Universal DMA HAL driver for configurations + * + */ + +#include +#include +#include + +/**************** Misc configurations *********************/ +/** Configure AXI generic configuration */ +int al_udma_axi_set(struct udma_gen_axi *axi_regs, + struct al_udma_axi_conf *axi) +{ + uint32_t reg; + + al_reg_write32(&axi_regs->cfg_1, axi->axi_timeout); + + reg = al_reg_read32(&axi_regs->cfg_2); + reg &= ~UDMA_GEN_AXI_CFG_2_ARB_PROMOTION_MASK; + reg |= axi->arb_promotion; + al_reg_write32(&axi_regs->cfg_2, reg); + + reg = al_reg_read32(&axi_regs->endian_cfg); + if (axi->swap_8_bytes == AL_TRUE) + reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_64B_EN; + else + reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_64B_EN; + + if (axi->swap_s2m_data == AL_TRUE) + reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DATA; + else + reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DATA; + + if (axi->swap_s2m_desc == AL_TRUE) + reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DESC; + else + reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DESC; + + if (axi->swap_m2s_data == AL_TRUE) + reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DATA; + else + reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DATA; + + if (axi->swap_m2s_desc == AL_TRUE) + reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DESC; + else + reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DESC; + + al_reg_write32(&axi_regs->endian_cfg, reg); + return 0; +} + +/* Configure UDMA AXI M2S configuration */ +/** Configure AXI M2S submaster */ +static int al_udma_m2s_axi_sm_set(struct al_udma_axi_submaster *m2s_sm, + uint32_t *cfg_1, uint32_t *cfg_2, + uint32_t *cfg_max_beats) +{ + uint32_t reg; + reg = al_reg_read32(cfg_1); + reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_MASK; + reg |= m2s_sm->id & UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_MASK; + reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_MASK; + reg |= (m2s_sm->cache_type << + UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_SHIFT) & + UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_MASK; + reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK; + reg |= (m2s_sm->burst << UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_SHIFT) & + UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK; + al_reg_write32(cfg_1, reg); + + reg = al_reg_read32(cfg_2); + reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_MASK; + reg |= m2s_sm->used_ext & UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_MASK; + reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_MASK; + reg |= (m2s_sm->bus_size << + UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_SHIFT) & + UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_MASK; + reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_MASK; + reg |= (m2s_sm->qos << UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_SHIFT) & + UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_MASK; + reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_MASK; + reg |= (m2s_sm->prot << UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_SHIFT) & + UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_MASK; + al_reg_write32(cfg_2, reg); + + reg = al_reg_read32(cfg_max_beats); + reg &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK; + reg |= m2s_sm->max_beats & + UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK; + al_reg_write32(cfg_max_beats, reg); + + return 0; +} + +/** Configure UDMA AXI M2S configuration */ +int al_udma_m2s_axi_set(struct al_udma *udma, + struct al_udma_m2s_axi_conf *axi_m2s) +{ + uint32_t reg; + + al_udma_m2s_axi_sm_set(&axi_m2s->comp_write, + &udma->udma_regs->m2s.axi_m2s.comp_wr_cfg_1, + &udma->udma_regs->m2s.axi_m2s.comp_wr_cfg_2, + &udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1); + + al_udma_m2s_axi_sm_set(&axi_m2s->data_read, + &udma->udma_regs->m2s.axi_m2s.data_rd_cfg_1, + &udma->udma_regs->m2s.axi_m2s.data_rd_cfg_2, + &udma->udma_regs->m2s.axi_m2s.data_rd_cfg); + + al_udma_m2s_axi_sm_set(&axi_m2s->desc_read, + &udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_1, + &udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_2, + &udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_3); + + reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.data_rd_cfg); + if (axi_m2s->break_on_max_boundary == AL_TRUE) + reg |= UDMA_AXI_M2S_DATA_RD_CFG_ALWAYS_BREAK_ON_MAX_BOUDRY; + else + reg &= ~UDMA_AXI_M2S_DATA_RD_CFG_ALWAYS_BREAK_ON_MAX_BOUDRY; + al_reg_write32(&udma->udma_regs->m2s.axi_m2s.data_rd_cfg, reg); + + reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1); + reg &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK; + reg |= (axi_m2s->min_axi_beats << + UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT) & + UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK; + al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1, reg); + + reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.ostand_cfg); + reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_MASK; + reg |= axi_m2s->ostand_max_data_read & + UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_MASK; + reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_MASK; + reg |= (axi_m2s->ostand_max_desc_read << + UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_SHIFT) & + UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_MASK; + reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_MASK; + reg |= (axi_m2s->ostand_max_comp_req << + UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_SHIFT) & + UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_MASK; + reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_MASK; + reg |= (axi_m2s->ostand_max_comp_write << + UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_SHIFT) & + UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_MASK; + al_reg_write32(&udma->udma_regs->m2s.axi_m2s.ostand_cfg, reg); + return 0; +} + +/** Configure AXI S2M submaster */ +static int al_udma_s2m_axi_sm_set(struct al_udma_axi_submaster *s2m_sm, + uint32_t *cfg_1, uint32_t *cfg_2, + uint32_t *cfg_max_beats) +{ + uint32_t reg; + reg = al_reg_read32(cfg_1); + reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK; + reg |= s2m_sm->id & UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK; + reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_MASK; + reg |= (s2m_sm->cache_type << + UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_SHIFT) & + UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_MASK; + reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK; + reg |= (s2m_sm->burst << UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_SHIFT) & + UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK; + al_reg_write32(cfg_1, reg); + + reg = al_reg_read32(cfg_2); + reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_MASK; + reg |= s2m_sm->used_ext & UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_MASK; + reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_MASK; + reg |= (s2m_sm->bus_size << UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_SHIFT) & + UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_MASK; + reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_MASK; + reg |= (s2m_sm->qos << UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_SHIFT) & + UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_MASK; + reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_MASK; + reg |= (s2m_sm->prot << UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_SHIFT) & + UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_MASK; + al_reg_write32(cfg_2, reg); + + reg = al_reg_read32(cfg_max_beats); + reg &= ~UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK; + reg |= s2m_sm->max_beats & + UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK; + al_reg_write32(cfg_max_beats, reg); + + return 0; +} + +/** Configure UDMA AXI S2M configuration */ +int al_udma_s2m_axi_set(struct al_udma *udma, + struct al_udma_s2m_axi_conf *axi_s2m) +{ + + uint32_t reg; + + al_udma_s2m_axi_sm_set(&axi_s2m->data_write, + &udma->udma_regs->s2m.axi_s2m.data_wr_cfg_1, + &udma->udma_regs->s2m.axi_s2m.data_wr_cfg_2, + &udma->udma_regs->s2m.axi_s2m.data_wr_cfg); + + al_udma_s2m_axi_sm_set(&axi_s2m->desc_read, + &udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_4, + &udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_5, + &udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_3); + + al_udma_s2m_axi_sm_set(&axi_s2m->comp_write, + &udma->udma_regs->s2m.axi_s2m.comp_wr_cfg_1, + &udma->udma_regs->s2m.axi_s2m.comp_wr_cfg_2, + &udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1); + + reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_3); + if (axi_s2m->break_on_max_boundary == AL_TRUE) + reg |= UDMA_AXI_S2M_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY; + else + reg &= ~UDMA_AXI_S2M_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY; + al_reg_write32(&udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_3, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1); + reg &= ~UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK; + reg |= (axi_s2m->min_axi_beats << + UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT) & + UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK; + al_reg_write32(&udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_rd); + reg &= ~UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_MASK; + reg |= axi_s2m->ostand_max_desc_read & + UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_MASK; + + reg &= ~UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_MASK; + reg |= (axi_s2m->ack_fifo_depth << + UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_SHIFT) & + UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_MASK; + + al_reg_write32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_rd, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_wr); + reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_MASK; + reg |= axi_s2m->ostand_max_data_req & + UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_MASK; + reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_MASK; + reg |= (axi_s2m->ostand_max_data_write << + UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_SHIFT) & + UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_MASK; + reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_MASK; + reg |= (axi_s2m->ostand_max_comp_req << + UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_SHIFT) & + UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_MASK; + reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_MASK; + reg |= (axi_s2m->ostand_max_comp_write << + UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_SHIFT) & + UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_MASK; + al_reg_write32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_wr, reg); + return 0; +} + +/** M2S packet len configuration */ +int al_udma_m2s_packet_size_cfg_set(struct al_udma *udma, + struct al_udma_m2s_pkt_len_conf *conf) +{ + uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s.cfg_len); + uint32_t max_supported_size = UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_MASK; + + al_assert(udma->type == UDMA_TX); + + if (conf->encode_64k_as_zero == AL_TRUE) + max_supported_size += 1; /* 64K */ + + if (conf->max_pkt_size > max_supported_size) { + al_err("udma [%s]: requested max_pkt_size (0x%x) exceeds the" + "supported limit (0x%x)\n", udma->name, + conf->max_pkt_size, max_supported_size); + return -EINVAL; + } + + reg &= ~UDMA_M2S_CFG_LEN_ENCODE_64K; + if (conf->encode_64k_as_zero == AL_TRUE) + reg |= UDMA_M2S_CFG_LEN_ENCODE_64K; + else + reg &= ~UDMA_M2S_CFG_LEN_ENCODE_64K; + + reg &= ~UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_MASK; + reg |= conf->max_pkt_size; + + al_reg_write32(&udma->udma_regs->m2s.m2s.cfg_len, reg); + return 0; +} + +/** Report Error - to be used for abort */ +void al_udma_err_report(struct al_udma *udma __attribute__((__unused__))) +{ + return; +} + +/** Statistics - TBD */ +void al_udma_stats_get(struct al_udma *udma __attribute__((__unused__))) +{ + return; +} + +/** Configure UDMA M2S descriptor prefetch */ +int al_udma_m2s_pref_set(struct al_udma *udma, + struct al_udma_m2s_desc_pref_conf *conf) +{ + uint32_t reg; + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1); + reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK; + reg |= conf->desc_fifo_depth; + al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1, reg); + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2); + + if (conf->sch_mode == SRR) + reg |= UDMA_M2S_RD_DESC_PREF_CFG_2_PREF_FORCE_RR; + else if (conf->sch_mode == STRICT) + reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_2_PREF_FORCE_RR; + else { + al_err("udma [%s]: requested descriptor preferch arbiter " + "mode (%d) is invalid\n", udma->name, conf->sch_mode); + return -EINVAL; + } + reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK; + reg |= conf->max_desc_per_packet & + UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK; + al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2, reg); + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3); + reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK; + reg |= conf->min_burst_below_thr & + UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK; + + reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK; + reg |=(conf->min_burst_above_thr << + UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT) & + UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK; + + reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK; + reg |= (conf->pref_thr << + UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) & + UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK; + + al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3, reg); + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.data_cfg); + reg &= ~UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK; + reg |= conf->data_fifo_depth & + UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK; + + reg &= ~UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_MASK; + reg |= (conf->max_pkt_limit + << UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_SHIFT) & + UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_MASK; + al_reg_write32(&udma->udma_regs->m2s.m2s_rd.data_cfg, reg); + + return 0; +} + +/** Ger the M2S UDMA descriptor prefetch */ +int al_udma_m2s_pref_get(struct al_udma *udma, + struct al_udma_m2s_desc_pref_conf *conf) +{ + uint32_t reg; + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1); + conf->desc_fifo_depth = + AL_REG_FIELD_GET(reg, UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK, + UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_SHIFT); + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2); + if (reg & UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK) + conf->sch_mode = SRR; + else + conf->sch_mode = STRICT; + conf->max_desc_per_packet = + AL_REG_FIELD_GET(reg, + UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK, + UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_SHIFT); + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3); + + conf->min_burst_below_thr = + AL_REG_FIELD_GET(reg, + UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK, + UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_SHIFT); + + conf->min_burst_above_thr = + AL_REG_FIELD_GET(reg, + UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK, + UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT); + + conf->pref_thr = AL_REG_FIELD_GET(reg, + UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK, + UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT); + return 0; +} + +/* set max descriptors */ +int al_udma_m2s_max_descs_set(struct al_udma *udma, uint8_t max_descs) +{ + uint32_t pref_thr = max_descs; + uint32_t min_burst_above_thr = 4; + al_assert(max_descs <= AL_UDMA_M2S_MAX_ALLOWED_DESCS_PER_PACKET); + al_assert(max_descs > 0); + + /* increase min_burst_above_thr so larger burst can be used to fetch + * descriptors */ + if (pref_thr >= 8) + min_burst_above_thr = 8; + else { + /* don't set prefetch threshold too low so we can have the + * min_burst_above_thr >= 4 */ + pref_thr = 4; + } + + al_reg_write32_masked(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2, + UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK, + max_descs << UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_SHIFT); + + al_reg_write32_masked(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3, + UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK | + UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK, + (max_descs << UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) | + (min_burst_above_thr << UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT)); + + return 0; +} + +/* set s2m max descriptors */ +int al_udma_s2m_max_descs_set(struct al_udma *udma, uint8_t max_descs) +{ + uint32_t pref_thr = max_descs; + uint32_t min_burst_above_thr = 4; + al_assert(max_descs <= AL_UDMA_S2M_MAX_ALLOWED_DESCS_PER_PACKET); + al_assert(max_descs > 0); + + /* increase min_burst_above_thr so larger burst can be used to fetch + * descriptors */ + if (pref_thr >= 8) + min_burst_above_thr = 8; + else + /* don't set prefetch threshold too low so we can have the + * min_burst_above_thr >= 4 */ + pref_thr = 4; + + al_reg_write32_masked(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3, + UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK | + UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK, + (max_descs << UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) | + (min_burst_above_thr << UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT)); + + return 0; +} + +int al_udma_s2m_full_line_write_set(struct al_udma *udma, al_bool enable) +{ + uint32_t val = 0; + + if (enable == AL_TRUE) { + val = UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE; + al_info("udma [%s]: full line write enabled\n", udma->name); + } + + al_reg_write32_masked(&udma->udma_regs->s2m.s2m_wr.data_cfg_2, + UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE, + val); + return 0; +} + +/** Configure S2M UDMA descriptor prefetch */ +int al_udma_s2m_pref_set(struct al_udma *udma, + struct al_udma_s2m_desc_pref_conf *conf) +{ + uint32_t reg; + + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_1); + reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK; + reg |= conf->desc_fifo_depth; + al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_1, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_2); + + if (conf->sch_mode == SRR) + reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_PREF_FORCE_RR; + else if (conf->sch_mode == STRICT) + reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_PREF_FORCE_RR; + else { + al_err("udma [%s]: requested descriptor preferch arbiter " + "mode (%d) is invalid\n", udma->name, conf->sch_mode); + return -EINVAL; + } + if (conf->q_promotion == AL_TRUE) + reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_Q_PROMOTION; + else + reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_Q_PROMOTION; + + if (conf->force_promotion == AL_TRUE) + reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_FORCE_PROMOTION; + else + reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_FORCE_PROMOTION; + + if (conf->en_pref_prediction == AL_TRUE) + reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_EN_PREF_PREDICTION; + else + reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_EN_PREF_PREDICTION; + + reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_MASK; + reg |= (conf->promotion_th + << UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_SHIFT) & + UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_MASK; + + al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_2, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3); + reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK; + reg |= (conf->pref_thr << UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) & + UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK; + + reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK; + reg |= conf->min_burst_below_thr & + UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK; + + reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK; + reg |=(conf->min_burst_above_thr << + UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT) & + UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK; + + al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_4); + reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_MASK; + reg |= conf->a_full_thr & UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_MASK; + al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_4, reg); + + + return 0; +} + +/* Configure S2M UDMA data write */ +int al_udma_s2m_data_write_set(struct al_udma *udma, + struct al_udma_s2m_data_write_conf *conf) +{ + uint32_t reg; + + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_wr.data_cfg_1); + reg &= ~UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_MASK; + reg |= conf->data_fifo_depth & + UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_MASK; + reg &= ~UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_MASK; + reg |= (conf->max_pkt_limit << + UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_SHIFT) & + UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_MASK; + reg &= ~UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_MASK; + reg |= (conf->fifo_margin << + UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_SHIFT) & + UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_MASK; + al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_1, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2); + reg &= ~UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK; + reg |= conf->desc_wait_timer & + UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK; + reg &= ~(UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC | + UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC | + UDMA_S2M_WR_DATA_CFG_2_WAIT_FOR_PREF | + UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE | + UDMA_S2M_WR_DATA_CFG_2_DIRECT_HDR_USE_BUF1); + reg |= conf->flags & + (UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC | + UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC | + UDMA_S2M_WR_DATA_CFG_2_WAIT_FOR_PREF | + UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE | + UDMA_S2M_WR_DATA_CFG_2_DIRECT_HDR_USE_BUF1); + al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2, reg); + + return 0; +} + +/* Configure S2M UDMA completion */ +int al_udma_s2m_completion_set(struct al_udma *udma, + struct al_udma_s2m_completion_conf *conf) +{ + uint32_t reg = al_reg_read32(&udma->udma_regs->s2m.s2m_comp.cfg_1c); + reg &= ~UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK; + reg |= conf->desc_size & UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK; + if (conf->cnt_words == AL_TRUE) + reg |= UDMA_S2M_COMP_CFG_1C_CNT_WORDS; + else + reg &= ~UDMA_S2M_COMP_CFG_1C_CNT_WORDS; + if (conf->q_promotion == AL_TRUE) + reg |= UDMA_S2M_COMP_CFG_1C_Q_PROMOTION; + else + reg &= ~UDMA_S2M_COMP_CFG_1C_Q_PROMOTION; + if (conf->force_rr == AL_TRUE) + reg |= UDMA_S2M_COMP_CFG_1C_FORCE_RR; + else + reg &= ~UDMA_S2M_COMP_CFG_1C_FORCE_RR; + reg &= ~UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_MASK; + reg |= (conf->q_free_min << UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_SHIFT) & + UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_MASK; + al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_1c, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_comp.cfg_2c); + reg &= ~UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_MASK; + reg |= conf->comp_fifo_depth + & UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_MASK; + reg &= ~UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK; + reg |= (conf->unack_fifo_depth + << UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_SHIFT) & + UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK; + al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_2c, reg); + + al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_application_ack, + conf->timeout); + return 0; +} + +/** Configure the M2S UDMA scheduling mode */ +int al_udma_m2s_sc_set(struct al_udma *udma, + struct al_udma_m2s_dwrr_conf *sched) +{ + uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s_dwrr.cfg_sched); + + if (sched->enable_dwrr == AL_TRUE) + reg |= UDMA_M2S_DWRR_CFG_SCHED_EN_DWRR; + else + reg &= ~UDMA_M2S_DWRR_CFG_SCHED_EN_DWRR; + + if (sched->pkt_mode == AL_TRUE) + reg |= UDMA_M2S_DWRR_CFG_SCHED_PKT_MODE_EN; + else + reg &= ~UDMA_M2S_DWRR_CFG_SCHED_PKT_MODE_EN; + + reg &= ~UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_MASK; + reg |= sched->weight << UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_SHIFT; + reg &= ~UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_MASK; + reg |= sched->inc_factor << UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_SHIFT; + al_reg_write32(&udma->udma_regs->m2s.m2s_dwrr.cfg_sched, reg); + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_dwrr.ctrl_deficit_cnt); + reg &= ~UDMA_M2S_DWRR_CTRL_DEFICIT_CNT_INIT_MASK; + reg |= sched->deficit_init_val; + al_reg_write32(&udma->udma_regs->m2s.m2s_dwrr.ctrl_deficit_cnt, reg); + + return 0; +} + +/** Configure the M2S UDMA rate limitation */ +int al_udma_m2s_rlimit_set(struct al_udma *udma, + struct al_udma_m2s_rlimit_mode *mode) +{ + uint32_t reg = al_reg_read32( + &udma->udma_regs->m2s.m2s_rate_limiter.gen_cfg); + + if (mode->pkt_mode_en == AL_TRUE) + reg |= UDMA_M2S_RATE_LIMITER_GEN_CFG_PKT_MODE_EN; + else + reg &= ~UDMA_M2S_RATE_LIMITER_GEN_CFG_PKT_MODE_EN; + reg &= ~UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_MASK; + reg |= mode->short_cycle_sz & + UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_MASK; + al_reg_write32(&udma->udma_regs->m2s.m2s_rate_limiter.gen_cfg, reg); + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_token); + reg &= ~UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_MASK; + reg |= mode->token_init_val & + UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_MASK; + al_reg_write32(&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_token, reg); + + return 0; +} + +int al_udma_m2s_rlimit_reset(struct al_udma *udma) +{ + uint32_t reg = al_reg_read32( + &udma->udma_regs->m2s.m2s_rate_limiter.ctrl_cycle_cnt); + reg |= UDMA_M2S_RATE_LIMITER_CTRL_CYCLE_CNT_RST; + al_reg_write32(&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_cycle_cnt, + reg); + return 0; +} + +/** Configure the Stream/Q rate limitation */ +static int al_udma_common_rlimit_set(struct udma_rlimit_common *regs, + struct al_udma_m2s_rlimit_cfg *conf) +{ + uint32_t reg = al_reg_read32(®s->cfg_1s); + /* mask max burst size, and enable/pause control bits */ + reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_MASK; + reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_EN; + reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_PAUSE; + reg |= conf->max_burst_sz & + UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_MASK; + al_reg_write32(®s->cfg_1s, reg); + + reg = al_reg_read32(®s->cfg_cycle); + reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_MASK; + reg |= conf->long_cycle_sz & + UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_MASK; + al_reg_write32(®s->cfg_cycle, reg); + + reg = al_reg_read32(®s->cfg_token_size_1); + reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK; + reg |= conf->long_cycle & + UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK; + al_reg_write32(®s->cfg_token_size_1, reg); + + reg = al_reg_read32(®s->cfg_token_size_2); + reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK; + reg |= conf->short_cycle & + UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK; + al_reg_write32(®s->cfg_token_size_2, reg); + + reg = al_reg_read32(®s->mask); + reg &= ~0xf; /* only bits 0-3 defined */ + reg |= conf->mask & 0xf; + al_reg_write32(®s->mask, reg); + + return 0; +} + +static int al_udma_common_rlimit_act(struct udma_rlimit_common *regs, + enum al_udma_m2s_rlimit_action act) +{ + uint32_t reg; + + switch (act) { + case AL_UDMA_STRM_RLIMIT_ENABLE: + reg = al_reg_read32(®s->cfg_1s); + reg |= UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_EN; + al_reg_write32(®s->cfg_1s, reg); + break; + case AL_UDMA_STRM_RLIMIT_PAUSE: + reg = al_reg_read32(®s->cfg_1s); + reg |= UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_PAUSE; + al_reg_write32(®s->cfg_1s, reg); + break; + case AL_UDMA_STRM_RLIMIT_RESET: + reg = al_reg_read32(®s->sw_ctrl); + reg |= UDMA_M2S_STREAM_RATE_LIMITER_SW_CTRL_RST_TOKEN_CNT; + al_reg_write32(®s->sw_ctrl, reg); + break; + default: + return -EINVAL; + } + return 0; +} + +/** Configure the M2S Stream rate limitation */ +int al_udma_m2s_strm_rlimit_set(struct al_udma *udma, + struct al_udma_m2s_rlimit_cfg *conf) +{ + struct udma_rlimit_common *rlimit_regs = + &udma->udma_regs->m2s.m2s_stream_rate_limiter.rlimit; + + return al_udma_common_rlimit_set(rlimit_regs, conf); +} + +int al_udma_m2s_strm_rlimit_act(struct al_udma *udma, + enum al_udma_m2s_rlimit_action act) +{ + struct udma_rlimit_common *rlimit_regs = + &udma->udma_regs->m2s.m2s_stream_rate_limiter.rlimit; + + if (al_udma_common_rlimit_act(rlimit_regs, act) == -EINVAL) { + al_err("udma [%s]: udma stream rate limit invalid action " + "(%d)\n", udma->name, act); + return -EINVAL; + } + return 0; +} + +/** Configure the M2S UDMA Q rate limitation */ +int al_udma_m2s_q_rlimit_set(struct al_udma_q *udma_q, + struct al_udma_m2s_rlimit_cfg *conf) +{ + struct udma_rlimit_common *rlimit_regs = &udma_q->q_regs->m2s_q.rlimit; + + return al_udma_common_rlimit_set(rlimit_regs, conf); +} + +int al_udma_m2s_q_rlimit_act(struct al_udma_q *udma_q, + enum al_udma_m2s_rlimit_action act) +{ + struct udma_rlimit_common *rlimit_regs = &udma_q->q_regs->m2s_q.rlimit; + + if (al_udma_common_rlimit_act(rlimit_regs, act) == -EINVAL) { + al_err("udma [%s %d]: udma stream rate limit invalid action " + "(%d)\n", + udma_q->udma->name, udma_q->qid, act); + return -EINVAL; + } + return 0; +} + +/** Configure the M2S UDMA Q scheduling mode */ +int al_udma_m2s_q_sc_set(struct al_udma_q *udma_q, + struct al_udma_m2s_q_dwrr_conf *conf) +{ + uint32_t reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_1); + + reg &= ~UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_MASK; + reg |= conf->max_deficit_cnt_sz & + UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_MASK; + if (conf->strict == AL_TRUE) + reg |= UDMA_M2S_Q_DWRR_CFG_1_STRICT; + else + reg &= ~UDMA_M2S_Q_DWRR_CFG_1_STRICT; + al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_1, reg); + + reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_2); + reg &= ~UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK; + reg |= (conf->axi_qos << UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_SHIFT) & + UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK; + reg &= ~UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK; + reg |= conf->q_qos & UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK; + al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_2, reg); + + reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_3); + reg &= ~UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_MASK; + reg |= conf->weight & UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_MASK; + al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_3, reg); + + return 0; +} + +int al_udma_m2s_q_sc_pause(struct al_udma_q *udma_q, al_bool set) +{ + uint32_t reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_1); + + if (set == AL_TRUE) + reg |= UDMA_M2S_Q_DWRR_CFG_1_PAUSE; + else + reg &= ~UDMA_M2S_Q_DWRR_CFG_1_PAUSE; + al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_1, reg); + + return 0; +} + +int al_udma_m2s_q_sc_reset(struct al_udma_q *udma_q) +{ + uint32_t reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_sw_ctrl); + + reg |= UDMA_M2S_Q_DWRR_SW_CTRL_RST_CNT; + al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_sw_ctrl, reg); + + return 0; +} + +/** M2S UDMA completion and application timeouts */ +int al_udma_m2s_comp_timeouts_set(struct al_udma *udma, + struct al_udma_m2s_comp_timeouts *conf) +{ + uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_1c); + + if (conf->sch_mode == SRR) + reg |= UDMA_M2S_COMP_CFG_1C_FORCE_RR; + else if (conf->sch_mode == STRICT) + reg &= ~UDMA_M2S_COMP_CFG_1C_FORCE_RR; + else { + al_err("udma [%s]: requested completion descriptor preferch " + "arbiter mode (%d) is invalid\n", + udma->name, conf->sch_mode); + return -EINVAL; + } + if (conf->enable_q_promotion == AL_TRUE) + reg |= UDMA_M2S_COMP_CFG_1C_Q_PROMOTION; + else + reg &= ~UDMA_M2S_COMP_CFG_1C_Q_PROMOTION; + reg &= ~UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_MASK; + reg |= + conf->comp_fifo_depth << UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_SHIFT; + + reg &= ~UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_MASK; + reg |= conf->unack_fifo_depth + << UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_SHIFT; + al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_1c, reg); + + al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_coal + , conf->coal_timeout); + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_application_ack); + reg &= ~UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_MASK; + reg |= conf->app_timeout << UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_SHIFT; + al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_application_ack, reg); + return 0; +} + +int al_udma_m2s_comp_timeouts_get(struct al_udma *udma, + struct al_udma_m2s_comp_timeouts *conf) +{ + uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_1c); + + if (reg & UDMA_M2S_COMP_CFG_1C_FORCE_RR) + conf->sch_mode = SRR; + else + conf->sch_mode = STRICT; + + if (reg & UDMA_M2S_COMP_CFG_1C_Q_PROMOTION) + conf->enable_q_promotion = AL_TRUE; + else + conf->enable_q_promotion = AL_FALSE; + + conf->comp_fifo_depth = + AL_REG_FIELD_GET(reg, + UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_MASK, + UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_SHIFT); + conf->unack_fifo_depth = + AL_REG_FIELD_GET(reg, + UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_MASK, + UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_SHIFT); + + conf->coal_timeout = al_reg_read32( + &udma->udma_regs->m2s.m2s_comp.cfg_coal); + + reg = al_reg_read32( + &udma->udma_regs->m2s.m2s_comp.cfg_application_ack); + + conf->app_timeout = + AL_REG_FIELD_GET(reg, + UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_MASK, + UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_SHIFT); + + return 0; +} + +/** + * S2M UDMA configure no descriptors behaviour + */ +int al_udma_s2m_no_desc_cfg_set(struct al_udma *udma, al_bool drop_packet, al_bool gen_interrupt, uint32_t wait_for_desc_timeout) +{ + uint32_t reg; + + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2); + + if ((drop_packet == AL_TRUE) && (wait_for_desc_timeout == 0)) { + al_err("udam [%s]: setting timeout to 0 will cause the udma to wait forever instead of dropping the packet", udma->name); + return -EINVAL; + } + + if (drop_packet == AL_TRUE) + reg |= UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC; + else + reg &= ~UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC; + + if (gen_interrupt == AL_TRUE) + reg |= UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC; + else + reg &= ~UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC; + + AL_REG_FIELD_SET(reg, UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK, UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_SHIFT, wait_for_desc_timeout); + + al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2, reg); + + return 0; +} + +/* S2M UDMA configure a queue's completion update */ +int al_udma_s2m_q_compl_updade_config(struct al_udma_q *udma_q, al_bool enable) +{ + uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.comp_cfg); + + if (enable == AL_TRUE) + reg |= UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE; + else + reg &= ~UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE; + + al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg, reg); + + return 0; +} + +/* S2M UDMA configure a queue's completion descriptors coalescing */ +int al_udma_s2m_q_compl_coal_config(struct al_udma_q *udma_q, al_bool enable, uint32_t + coal_timeout) +{ + uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.comp_cfg); + + if (enable == AL_TRUE) + reg &= ~UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL; + else + reg |= UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL; + + al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg, reg); + + al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg_2, coal_timeout); + return 0; +} + +/* S2M UDMA configure completion descriptors write burst parameters */ +int al_udma_s2m_compl_desc_burst_config(struct al_udma *udma, uint16_t + burst_size) +{ + if ((burst_size != 64) && (burst_size != 128) && (burst_size != 256)) { + al_err("%s: invalid burst_size value (%d)\n", __func__, + burst_size); + return -EINVAL; + } + + /* convert burst size from bytes to beats (16 byte) */ + burst_size = burst_size / 16; + al_reg_write32_masked(&udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1, + UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK | + UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK, + burst_size << UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT | + burst_size << UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT); + return 0; +} + +/* S2M UDMA per queue completion configuration */ +int al_udma_s2m_q_comp_set(struct al_udma_q *udma_q, + struct al_udma_s2m_q_comp_conf *conf) +{ + uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.comp_cfg); + if (conf->en_comp_ring_update == AL_TRUE) + reg |= UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE; + else + reg &= ~UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE; + + if (conf->dis_comp_coal == AL_TRUE) + reg |= UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL; + else + reg &= ~UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL; + + al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg, reg); + + al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg_2, conf->comp_timer); + + reg = al_reg_read32(&udma_q->q_regs->s2m_q.pkt_cfg); + + reg &= ~UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK; + reg |= conf->hdr_split_size & UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK; + if (conf->force_hdr_split == AL_TRUE) + reg |= UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT; + else + reg &= ~UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT; + if (conf->en_hdr_split == AL_TRUE) + reg |= UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT; + else + reg &= ~UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT; + + al_reg_write32(&udma_q->q_regs->s2m_q.pkt_cfg, reg); + + reg = al_reg_read32(&udma_q->q_regs->s2m_q.qos_cfg); + reg &= ~UDMA_S2M_QOS_CFG_Q_QOS_MASK; + reg |= conf->q_qos & UDMA_S2M_QOS_CFG_Q_QOS_MASK; + al_reg_write32(&udma_q->q_regs->s2m_q.qos_cfg, reg); + + return 0; +} + +/* UDMA VMID control configuration */ +void al_udma_gen_vmid_conf_set( + struct unit_regs *unit_regs, + struct al_udma_gen_vmid_conf *conf) +{ + al_reg_write32_masked( + &unit_regs->gen.vmid.cfg_vmid_0, + UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_MASK | + UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_MASK | + UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_MASK | + UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_MASK, + (((conf->tx_q_conf[0].desc_en << 0) | + (conf->tx_q_conf[1].desc_en << 1) | + (conf->tx_q_conf[2].desc_en << 2) | + (conf->tx_q_conf[3].desc_en << 3)) << + UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_SHIFT) | + (((conf->tx_q_conf[0].queue_en << 0) | + (conf->tx_q_conf[1].queue_en << 1) | + (conf->tx_q_conf[2].queue_en << 2) | + (conf->tx_q_conf[3].queue_en << 3)) << + UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_SHIFT) | + (((conf->rx_q_conf[0].desc_en << 0) | + (conf->rx_q_conf[1].desc_en << 1) | + (conf->rx_q_conf[2].desc_en << 2) | + (conf->rx_q_conf[3].desc_en << 3)) << + UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_SHIFT) | + (((conf->rx_q_conf[0].queue_en << 0) | + (conf->rx_q_conf[1].queue_en << 1) | + (conf->rx_q_conf[2].queue_en << 2) | + (conf->rx_q_conf[3].queue_en << 3)) << + UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_SHIFT)); + + al_reg_write32( + &unit_regs->gen.vmid.cfg_vmid_1, + (conf->tx_q_conf[0].vmid << + UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_SHIFT) | + (conf->tx_q_conf[1].vmid << + UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_SHIFT)); + + al_reg_write32( + &unit_regs->gen.vmid.cfg_vmid_2, + (conf->tx_q_conf[2].vmid << + UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_SHIFT) | + (conf->tx_q_conf[3].vmid << + UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_SHIFT)); + + al_reg_write32( + &unit_regs->gen.vmid.cfg_vmid_3, + (conf->rx_q_conf[0].vmid << + UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_SHIFT) | + (conf->rx_q_conf[1].vmid << + UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_SHIFT)); + + al_reg_write32( + &unit_regs->gen.vmid.cfg_vmid_4, + (conf->rx_q_conf[2].vmid << + UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_SHIFT) | + (conf->rx_q_conf[3].vmid << + UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_SHIFT)); +} + +/* UDMA VMID MSIX control configuration */ +void al_udma_gen_vmid_msix_conf_set( + struct unit_regs *unit_regs, + struct al_udma_gen_vmid_msix_conf *conf) +{ + al_reg_write32_masked( + &unit_regs->gen.vmid.cfg_vmid_0, + UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN | + UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL, + (conf->access_en ? UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN : 0) | + (conf->sel ? UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL : 0)); +} + +/* UDMA VMID control advanced Tx queue configuration */ +void al_udma_gen_vmid_advanced_tx_q_conf( + struct al_udma_q *q, + struct al_udma_gen_vmid_advanced_tx_q_conf *conf) +{ + struct unit_regs *unit_regs = (struct unit_regs *)q->udma->udma_regs; + struct udma_gen_vmpr *vmpr = &unit_regs->gen.vmpr[q->qid]; + + al_reg_write32_masked( + &vmpr->cfg_vmpr_0, + UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_MASK | + UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN | + UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN | + UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN, + conf->tx_q_addr_hi_sel | + ((conf->tx_q_data_vmid_en == AL_TRUE) ? + UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN : 0) | + ((conf->tx_q_prefetch_vmid_en == AL_TRUE) ? + UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN : 0) | + ((conf->tx_q_compl_vmid_en == AL_TRUE) ? + UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN : 0)); + + al_reg_write32( + &vmpr->cfg_vmpr_1, + conf->tx_q_addr_hi); + + al_reg_write32_masked( + &vmpr->cfg_vmpr_2, + UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_MASK | + UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_MASK, + (conf->tx_q_prefetch_vmid << + UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_SHIFT) | + (conf->tx_q_compl_vmid << + UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_SHIFT)); + + al_reg_write32_masked( + &vmpr->cfg_vmpr_3, + UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_MASK | + UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_MASK, + (conf->tx_q_data_vmid << + UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SHIFT) | + (conf->tx_q_data_vmid_mask << + UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_SHIFT)); +} + +/** UDMA VMID control advanced Rx queue configuration */ +void al_udma_gen_vmid_advanced_rx_q_conf( + struct al_udma_q *q, + struct al_udma_gen_vmid_advanced_rx_q_conf *conf) +{ + struct unit_regs *unit_regs = (struct unit_regs *)q->udma->udma_regs; + struct udma_gen_vmpr *vmpr = &unit_regs->gen.vmpr[q->qid]; + + al_reg_write32_masked( + &vmpr->cfg_vmpr_4, + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_MASK | + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN | + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_MASK | + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN | + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_MASK | + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN | + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN | + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN, + (conf->rx_q_addr_hi_sel << + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_SHIFT) | + ((conf->rx_q_data_vmid_en == AL_TRUE) ? + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN : 0) | + (conf->rx_q_data_buff2_addr_hi_sel << + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_SHIFT) | + ((conf->rx_q_data_buff2_vmid_en == AL_TRUE) ? + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN : 0) | + (conf->rx_q_ddp_addr_hi_sel << + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_SHIFT) | + ((conf->rx_q_ddp_vmid_en == AL_TRUE) ? + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN : 0) | + ((conf->rx_q_prefetch_vmid_en == AL_TRUE) ? + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN : 0) | + ((conf->rx_q_compl_vmid_en == AL_TRUE) ? + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN : 0)); + + al_reg_write32_masked( + &vmpr->cfg_vmpr_6, + UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_MASK | + UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_MASK, + (conf->rx_q_prefetch_vmid << + UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_SHIFT) | + (conf->rx_q_compl_vmid << + UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_SHIFT)); + + al_reg_write32_masked( + &vmpr->cfg_vmpr_7, + UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_MASK | + UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_MASK, + (conf->rx_q_data_vmid << + UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SHIFT) | + (conf->rx_q_data_vmid_mask << + UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_SHIFT)); + + al_reg_write32_masked( + &vmpr->cfg_vmpr_8, + UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_MASK | + UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_MASK, + (conf->rx_q_data_vmid << + UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SHIFT) | + (conf->rx_q_data_vmid_mask << + UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_SHIFT)); + + al_reg_write32_masked( + &vmpr->cfg_vmpr_9, + UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_MASK | + UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_MASK, + (conf->rx_q_data_vmid << + UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SHIFT) | + (conf->rx_q_data_vmid_mask << + UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_SHIFT)); + + al_reg_write32( + &vmpr->cfg_vmpr_10, + conf->rx_q_addr_hi); + + al_reg_write32( + &vmpr->cfg_vmpr_11, + conf->rx_q_data_buff2_addr_hi); + + al_reg_write32( + &vmpr->cfg_vmpr_12, + conf->rx_q_ddp_addr_hi); +} + +/* UDMA header split buffer 2 Rx queue configuration */ +void al_udma_gen_hdr_split_buff2_rx_q_conf( + struct al_udma_q *q, + struct al_udma_gen_hdr_split_buff2_q_conf *conf) +{ + struct unit_regs *unit_regs = (struct unit_regs *)q->udma->udma_regs; + struct udma_gen_vmpr *vmpr = &unit_regs->gen.vmpr[q->qid]; + + al_reg_write32_masked( + &vmpr->cfg_vmpr_4, + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_MASK, + conf->add_msb_sel << + UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_SHIFT); + + al_reg_write32( + &vmpr->cfg_vmpr_5, + conf->addr_msb); +} + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_debug.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_debug.c new file mode 100644 index 00000000000000..37c52e3aaf1d38 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_debug.c @@ -0,0 +1,496 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @file al_hal_udma_debug.c + * + * @brief Universal DMA HAL driver for debug + * + */ + +#define DEBUG + +#include +#include +#include + +static void al_udma_regs_m2s_axi_print(struct al_udma *udma) +{ + al_dbg("M2S AXI regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, comp_wr_cfg_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, comp_wr_cfg_2); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, data_rd_cfg_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, data_rd_cfg_2); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_rd_cfg_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_rd_cfg_2); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, data_rd_cfg); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_rd_cfg_3); + + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_wr_cfg_1); + AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, axi_m2s, + desc_wr_cfg_1, + max_axi_beats, + UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS); + AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, axi_m2s, + desc_wr_cfg_1, + min_axi_beats, + UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS); + + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, ostand_cfg); +} + +static void al_udma_regs_m2s_general_print(struct al_udma *udma) +{ + al_dbg("M2S general regs:\n"); + + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, state); + AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state, + comp_ctrl, + UDMA_M2S_STATE_COMP_CTRL); + AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state, + stream_if, + UDMA_M2S_STATE_STREAM_IF); + AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state, + rd_ctrl, + UDMA_M2S_STATE_DATA_RD_CTRL); + AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state, + desc_pref, + UDMA_M2S_STATE_DESC_PREF); + + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, err_log_mask); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_0); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_2); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_3); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, data_fifo_status); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, header_fifo_status); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, unack_fifo_status); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, check_en); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, fifo_en); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, cfg_len); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, stream_cfg); +} + +static void al_udma_regs_m2s_rd_print(struct al_udma *udma) +{ + al_dbg("M2S read regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, desc_pref_cfg_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, desc_pref_cfg_2); + + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, desc_pref_cfg_3); + AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_rd, + desc_pref_cfg_3, + min_burst_below_thr, + UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR); + AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_rd, + desc_pref_cfg_3, + min_burst_above_thr, + UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR); + AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_rd, + desc_pref_cfg_3, + pref_thr, + UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR); + + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, data_cfg); +} + +static void al_udma_regs_m2s_dwrr_print(struct al_udma *udma) +{ + al_dbg("M2S DWRR regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_dwrr, cfg_sched); +} + +static void al_udma_regs_m2s_rate_limiter_print(struct al_udma *udma) +{ + al_dbg("M2S rate limiter regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rate_limiter, gen_cfg); +} + +static void al_udma_regs_m2s_stream_rate_limiter_print(struct al_udma *udma) +{ + al_dbg("M2S stream rate limiter regs:\n"); + + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter, + rlimit.cfg_1s); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter, + rlimit.cfg_cycle); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter, + rlimit.cfg_token_size_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter, + rlimit.cfg_token_size_2); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter, + rlimit.mask); +} + +static void al_udma_regs_m2s_comp_print(struct al_udma *udma) +{ + al_dbg("M2S completion regs:\n"); + + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_comp, cfg_1c); + + AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_comp, cfg_1c, + comp_fifo_depth, + UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH); + AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_comp, cfg_1c, + unack_fifo_depth, + UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH); + AL_UDMA_PRINT_REG_BIT(udma, " ", "\n", m2s, m2s_comp, cfg_1c, + q_promotion, + UDMA_M2S_COMP_CFG_1C_Q_PROMOTION); + AL_UDMA_PRINT_REG_BIT(udma, " ", "\n", m2s, m2s_comp, cfg_1c, + force_rr, + UDMA_M2S_COMP_CFG_1C_FORCE_RR); + AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_comp, cfg_1c, + q_free_min, + UDMA_M2S_COMP_CFG_1C_Q_FREE_MIN); + + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_comp, cfg_coal); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_comp, cfg_application_ack); +} + +static void al_udma_regs_m2s_stat_print(struct al_udma *udma) +{ + al_dbg("M2S statistics regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, cfg_st); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, tx_pkt); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, tx_bytes_low); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, tx_bytes_high); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, prefed_desc); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, comp_pkt); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, comp_desc); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, ack_pkts); +} + +static void al_udma_regs_m2s_feature_print(struct al_udma *udma) +{ + al_dbg("M2S feature regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_3); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_4); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_5); +} + +static void al_udma_regs_m2s_q_print(struct al_udma *udma, uint32_t qid) +{ + al_dbg("M2S Q[%d] status regs:\n", qid); + al_reg_write32(&udma->udma_regs->m2s.m2s.indirect_ctrl, qid); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_pref_fifo_status); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_comp_fifo_status); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_rate_limit_status); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_dwrr_status); + + al_dbg("M2S Q[%d] regs:\n", qid); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], cfg); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], status); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrbp_low); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrbp_high); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrl); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrhp); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrtp); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdcp); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tcrbp_low); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tcrbp_high); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tcrhp); + + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], rlimit.cfg_1s); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], rlimit.cfg_cycle); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], + rlimit.cfg_token_size_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], + rlimit.cfg_token_size_2); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], rlimit.mask); + + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], dwrr_cfg_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], dwrr_cfg_2); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], dwrr_cfg_3); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], comp_cfg); + AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], q_tx_pkt); +} + +static void al_udma_regs_s2m_axi_print(struct al_udma *udma) +{ + al_dbg("S2M AXI regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, data_wr_cfg_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, data_wr_cfg_2); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_rd_cfg_4); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_rd_cfg_5); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, comp_wr_cfg_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, comp_wr_cfg_2); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, data_wr_cfg); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_rd_cfg_3); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_wr_cfg_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, ostand_cfg_rd); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, ostand_cfg_wr); +} + +static void al_udma_regs_s2m_general_print(struct al_udma *udma) +{ + al_dbg("S2M general regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, state); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, err_log_mask); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_0); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_2); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_3); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, s_data_fifo_status); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, s_header_fifo_status); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, axi_data_fifo_status); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, unack_fifo_status); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, check_en); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, fifo_en); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, stream_cfg); +} + +static void al_udma_regs_s2m_rd_print(struct al_udma *udma) +{ + al_dbg("S2M read regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_2); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_3); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_4); +} + +static void al_udma_regs_s2m_wr_print(struct al_udma *udma) +{ + al_dbg("S2M write regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_wr, data_cfg_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_wr, data_cfg_1); +} + +static void al_udma_regs_s2m_comp_print(struct al_udma *udma) +{ + al_dbg("S2M completion regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_comp, cfg_1c); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_comp, cfg_2c); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_comp, cfg_application_ack); +} + +static void al_udma_regs_s2m_stat_print(struct al_udma *udma) +{ + al_dbg("S2M statistics regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, drop_pkt); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, rx_bytes_low); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, rx_bytes_high); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, prefed_desc); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, comp_pkt); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, comp_desc); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, ack_pkts); +} + +static void al_udma_regs_s2m_feature_print(struct al_udma *udma) +{ + al_dbg("S2M feature regs:\n"); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_1); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_3); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_4); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_5); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_6); +} + +static void al_udma_regs_s2m_q_print(struct al_udma *udma, uint32_t qid) +{ + al_dbg("S2M Q[%d] status regs:\n", qid); + al_reg_write32(&udma->udma_regs->m2s.m2s.indirect_ctrl, qid); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, sel_pref_fifo_status); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, sel_comp_fifo_status); + + al_dbg("S2M Q[%d] regs:\n", qid); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], cfg); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], status); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrbp_low); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrbp_high); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrl); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrhp); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrtp); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdcp); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrbp_low); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrbp_high); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrhp); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrhp_internal); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], comp_cfg); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], comp_cfg_2); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], pkt_cfg); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], qos_cfg); + AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], q_rx_pkt); +} + +void al_udma_regs_print(struct al_udma *udma, unsigned int mask) +{ + uint32_t i; + + if (!udma) + return; + + if (udma->type == UDMA_TX) { + if (mask & AL_UDMA_DEBUG_AXI) + al_udma_regs_m2s_axi_print(udma); + if (mask & AL_UDMA_DEBUG_GENERAL) + al_udma_regs_m2s_general_print(udma); + if (mask & AL_UDMA_DEBUG_READ) + al_udma_regs_m2s_rd_print(udma); + if (mask & AL_UDMA_DEBUG_DWRR) + al_udma_regs_m2s_dwrr_print(udma); + if (mask & AL_UDMA_DEBUG_RATE_LIMITER) + al_udma_regs_m2s_rate_limiter_print(udma); + if (mask & AL_UDMA_DEBUG_STREAM_RATE_LIMITER) + al_udma_regs_m2s_stream_rate_limiter_print(udma); + if (mask & AL_UDMA_DEBUG_COMP) + al_udma_regs_m2s_comp_print(udma); + if (mask & AL_UDMA_DEBUG_STAT) + al_udma_regs_m2s_stat_print(udma); + if (mask & AL_UDMA_DEBUG_FEATURE) + al_udma_regs_m2s_feature_print(udma); + for (i = 0; i < DMA_MAX_Q; i++) { + if (mask & AL_UDMA_DEBUG_QUEUE(i)) + al_udma_regs_m2s_q_print(udma, i); + } + } else { + if (mask & AL_UDMA_DEBUG_AXI) + al_udma_regs_s2m_axi_print(udma); + if (mask & AL_UDMA_DEBUG_GENERAL) + al_udma_regs_s2m_general_print(udma); + if (mask & AL_UDMA_DEBUG_READ) + al_udma_regs_s2m_rd_print(udma); + if (mask & AL_UDMA_DEBUG_WRITE) + al_udma_regs_s2m_wr_print(udma); + if (mask & AL_UDMA_DEBUG_COMP) + al_udma_regs_s2m_comp_print(udma); + if (mask & AL_UDMA_DEBUG_STAT) + al_udma_regs_s2m_stat_print(udma); + if (mask & AL_UDMA_DEBUG_FEATURE) + al_udma_regs_s2m_feature_print(udma); + for (i = 0; i < DMA_MAX_Q; i++) { + if (mask & AL_UDMA_DEBUG_QUEUE(i)) + al_udma_regs_s2m_q_print(udma, i); + } + } +} + +void al_udma_q_struct_print(struct al_udma *udma, uint32_t qid) +{ + struct al_udma_q *queue; + + if (!udma) + return; + + if (qid >= DMA_MAX_Q) + return; + + queue = &udma->udma_q[qid]; + + al_dbg("Q[%d] struct:\n", qid); + al_dbg(" size_mask = 0x%08x\n", (uint32_t)queue->size_mask); + al_dbg(" q_regs = %p\n", queue->q_regs); + al_dbg(" desc_base_ptr = %p\n", queue->desc_base_ptr); + al_dbg(" next_desc_idx = %d\n", (uint16_t)queue->next_desc_idx); + al_dbg(" desc_ring_id = %d\n", (uint32_t)queue->desc_ring_id); + al_dbg(" cdesc_base_ptr = %p\n", queue->cdesc_base_ptr); + al_dbg(" cdesc_size = %d\n", (uint32_t)queue->cdesc_size); + al_dbg(" next_cdesc_idx = %d\n", (uint16_t)queue->next_cdesc_idx); + al_dbg(" end_cdesc_ptr = %p\n", queue->end_cdesc_ptr); + al_dbg(" comp_head_idx = %d\n", (uint16_t)queue->comp_head_idx); + al_dbg(" comp_head_ptr = %p\n", queue->comp_head_ptr); + al_dbg(" pkt_crnt_descs = %d\n", (uint32_t)queue->pkt_crnt_descs); + al_dbg(" comp_ring_id = %d\n", (uint32_t)queue->comp_ring_id); + al_dbg(" desc_phy_base = 0x%016llx\n", (uint64_t)queue->desc_phy_base); + al_dbg(" cdesc_phy_base = 0x%016llx\n", + (uint64_t)queue->cdesc_phy_base); + al_dbg(" flags = 0x%08x\n", (uint32_t)queue->flags); + al_dbg(" size = %d\n", (uint32_t)queue->size); + al_dbg(" status = %d\n", (uint32_t)queue->status); + al_dbg(" udma = %p\n", queue->udma); + al_dbg(" qid = %d\n", (uint32_t)queue->qid); +} + +void al_udma_ring_print(struct al_udma *udma, uint32_t qid, + enum al_udma_ring_type rtype) +{ + struct al_udma_q *queue; + uint32_t desc_size; + void *base_ptr; + uint32_t i; + + if (!udma) + return; + + if (qid >= DMA_MAX_Q) + return; + + queue = &udma->udma_q[qid]; + if (rtype == AL_RING_SUBMISSION) { + base_ptr = queue->desc_base_ptr; + desc_size = sizeof(union al_udma_desc); + if (base_ptr) + al_dbg("Q[%d] submission ring pointers:\n", qid); + else { + al_dbg("Q[%d] submission ring is not allocated\n", qid); + return; + } + } else { + base_ptr = queue->cdesc_base_ptr; + desc_size = queue->cdesc_size; + if (base_ptr) + al_dbg("Q[%d] completion ring pointers:\n", qid); + else { + al_dbg("Q[%d] completion ring is not allocated\n", qid); + return; + } + } + + for (i = 0; i < queue->size; i++) { + uint32_t *curr_addr = base_ptr + i * desc_size; + if (desc_size == 16) + al_dbg("[%04d](%p): %08x %08x %08x %08x\n", + i, + curr_addr, + (uint32_t)*curr_addr, + (uint32_t)*(curr_addr+1), + (uint32_t)*(curr_addr+2), + (uint32_t)*(curr_addr+3)); + else if (desc_size == 8) + al_dbg("[%04d](%p): %08x %08x\n", + i, + curr_addr, + (uint32_t)*curr_addr, + (uint32_t)*(curr_addr+1)); + else if (desc_size == 4) + al_dbg("[%04d](%p): %08x\n", + i, + curr_addr, + (uint32_t)*curr_addr); + else + break; + } +} diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_fast.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_fast.c new file mode 100644 index 00000000000000..5320ca6d3c1a1b --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_fast.c @@ -0,0 +1,77 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#include + +int al_udma_fast_memcpy_q_prepare(struct al_udma_q *udma_txq, + struct al_udma_q *udma_rxq, + struct al_memcpy_transaction *xaction) +{ + union al_udma_desc *desc; + uint32_t i; + uint32_t attr = 0; + uint32_t tx_flags = 0; + uint32_t rx_flags = 0; + + attr |= AL_CRC_CHECKSUM << TX_DESC_META_OP_SHIFT; + attr |= AL_CRC_CHECKSUM_NULL << TX_DESC_META_CRC_OP_TYPE_SHIFT; + attr |= TX_DESC_META_CRC_SEND_ORIG; + attr |= RX_DESC_META_CRC_FIRST_BUF; + attr |= RX_DESC_META_CRC_LAST_BUF; + + if (xaction->flags & AL_SSM_BARRIER) + tx_flags |= AL_M2S_DESC_DMB; + if (xaction->flags & AL_SSM_SRC_NO_SNOOP) + tx_flags |= AL_M2S_DESC_NO_SNOOP_H; + tx_flags |= AL_M2S_DESC_FIRST | AL_M2S_DESC_LAST; + + if (xaction->flags & AL_SSM_INTERRUPT) + rx_flags |= AL_M2S_DESC_INT_EN; + if (xaction->flags & AL_SSM_DEST_NO_SNOOP) + rx_flags |= AL_M2S_DESC_NO_SNOOP_H; + + for (i = 0; i < udma_txq->size; i++) { + desc = udma_txq->desc_base_ptr + i; + desc->tx.meta_ctrl = swap32_to_le(attr); + desc->tx.len_ctrl = swap32_to_le(tx_flags); + } + + for (i = 0; i < udma_rxq->size; i++) { + desc = udma_rxq->desc_base_ptr + i; + desc->rx.len_ctrl = swap32_to_le(rx_flags); + } + + return 0; +} diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_iofic.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_iofic.c new file mode 100644 index 00000000000000..6351d537232bdc --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_iofic.c @@ -0,0 +1,150 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @{ + * @file al_hal_udma_iofic.c + * + * @brief unit interrupts configurations + * + */ + +#include +#include + +/* + * configure the interrupt registers, interrupts will are kept masked + */ +static int al_udma_main_iofic_config(struct al_iofic_regs __iomem *base, + enum al_iofic_mode mode) +{ + switch (mode) { + case AL_IOFIC_MODE_LEGACY: + al_iofic_config(base, AL_INT_GROUP_A, + INT_CONTROL_GRP_SET_ON_POSEDGE | + INT_CONTROL_GRP_MASK_MSI_X | + INT_CONTROL_GRP_CLEAR_ON_READ); + al_iofic_config(base, AL_INT_GROUP_B, + INT_CONTROL_GRP_CLEAR_ON_READ | + INT_CONTROL_GRP_MASK_MSI_X); + al_iofic_config(base, AL_INT_GROUP_C, + INT_CONTROL_GRP_CLEAR_ON_READ | + INT_CONTROL_GRP_MASK_MSI_X); + al_iofic_config(base, AL_INT_GROUP_D, + INT_CONTROL_GRP_SET_ON_POSEDGE | + INT_CONTROL_GRP_MASK_MSI_X | + INT_CONTROL_GRP_CLEAR_ON_READ); + break; + case AL_IOFIC_MODE_MSIX_PER_Q: + al_iofic_config(base, AL_INT_GROUP_A, + INT_CONTROL_GRP_SET_ON_POSEDGE | + INT_CONTROL_GRP_AUTO_MASK | + INT_CONTROL_GRP_AUTO_CLEAR); + al_iofic_config(base, AL_INT_GROUP_B, + INT_CONTROL_GRP_AUTO_CLEAR | + INT_CONTROL_GRP_AUTO_MASK | + INT_CONTROL_GRP_CLEAR_ON_READ); + al_iofic_config(base, AL_INT_GROUP_C, + INT_CONTROL_GRP_AUTO_CLEAR | + INT_CONTROL_GRP_AUTO_MASK | + INT_CONTROL_GRP_CLEAR_ON_READ); + al_iofic_config(base, AL_INT_GROUP_D, + INT_CONTROL_GRP_SET_ON_POSEDGE | + INT_CONTROL_GRP_CLEAR_ON_READ | + INT_CONTROL_GRP_MASK_MSI_X); + break; + case AL_IOFIC_MODE_MSIX_PER_GROUP: + al_iofic_config(base, AL_INT_GROUP_A, + INT_CONTROL_GRP_SET_ON_POSEDGE | + INT_CONTROL_GRP_AUTO_CLEAR | + INT_CONTROL_GRP_AUTO_MASK); + al_iofic_config(base, AL_INT_GROUP_B, + INT_CONTROL_GRP_CLEAR_ON_READ | + INT_CONTROL_GRP_MASK_MSI_X); + al_iofic_config(base, AL_INT_GROUP_C, + INT_CONTROL_GRP_CLEAR_ON_READ | + INT_CONTROL_GRP_MASK_MSI_X); + al_iofic_config(base, AL_INT_GROUP_D, + INT_CONTROL_GRP_SET_ON_POSEDGE | + INT_CONTROL_GRP_CLEAR_ON_READ | + INT_CONTROL_GRP_MASK_MSI_X); + break; + default: + al_err("%s: invalid mode (%d)\n", __func__, mode); + return -EINVAL; + } + + al_dbg("%s: base.%p mode %d\n", __func__, base, mode); + return 0; +} + +/* + * configure the UDMA interrupt registers, interrupts are kept masked + */ +int al_udma_iofic_config(struct unit_regs __iomem *regs, enum al_iofic_mode mode, + uint32_t m2s_errors_disable, + uint32_t m2s_aborts_disable, + uint32_t s2m_errors_disable, + uint32_t s2m_aborts_disable) +{ + int rc; + + rc = al_udma_main_iofic_config(®s->gen.interrupt_regs.main_iofic, mode); + if (rc != 0) + return rc; + + al_iofic_unmask(®s->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_A, ~m2s_errors_disable); + al_iofic_abort_mask(®s->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_A, m2s_aborts_disable); + + al_iofic_unmask(®s->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_B, ~s2m_errors_disable); + al_iofic_abort_mask(®s->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_B, s2m_aborts_disable); + + al_dbg("%s base.%p mode %d\n", __func__, regs, mode); + return 0; +} + +/* + * return the offset of the unmask register for a given group + */ +uint32_t __iomem * al_udma_iofic_unmask_offset_get( + struct unit_regs __iomem *regs, + enum al_udma_iofic_level level, + int group) +{ + al_assert(al_udma_iofic_level_and_group_valid(level, group)); + return al_iofic_unmask_offset_get(al_udma_iofic_reg_base_get(regs, level), group); +} + +/** @} end of UDMA group */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_main.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_main.c new file mode 100644 index 00000000000000..bad58714e9d9e4 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_hal_udma_main.c @@ -0,0 +1,730 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @{ + * @file al_hal_udma_main.c + * + * @brief Universal DMA HAL driver for main functions (initialization, data path) + * + */ + +#include + +#define AL_UDMA_Q_RST_TOUT 10000 /* Queue reset timeout [uSecs] */ + +#define UDMA_STATE_IDLE 0x0 +#define UDMA_STATE_NORMAL 0x1 +#define UDMA_STATE_ABORT 0x2 +#define UDMA_STATE_RESERVED 0x3 + +const char *const al_udma_states_name[] = { + "Disable", + "Idle", + "Normal", + "Abort", + "Reset" +}; + +#define AL_UDMA_INITIAL_RING_ID 1 + +/* dma_q flags */ +#define AL_UDMA_Q_FLAGS_IGNORE_RING_ID AL_BIT(0) +#define AL_UDMA_Q_FLAGS_NO_COMP_UPDATE AL_BIT(1) +#define AL_UDMA_Q_FLAGS_EN_COMP_COAL AL_BIT(2) + + +static void al_udma_set_defaults(struct al_udma *udma) +{ + uint32_t tmp; + +#if 0 + uint32_t reg, reg2; +#endif + + if (udma->type == UDMA_TX) { + struct unit_regs* tmp_unit_regs = + (struct unit_regs*)udma->udma_regs; + + tmp = al_reg_read32( + &udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3); + tmp &= ~ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK; + tmp |= 16 << UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT; + al_reg_write32(&tmp_unit_regs->m2s.m2s_rd.desc_pref_cfg_3 + , tmp); + al_reg_write32(& tmp_unit_regs->gen.axi.cfg_1,0); + al_reg_write32(&tmp_unit_regs->m2s.m2s_comp.cfg_application_ack + , 0); /* Ack time out */ + + // TODO: remove this code in the future + tmp = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1); + tmp &= ~ UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK; + tmp |= 4 << UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT; + al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1 + , tmp); + + } + if (udma->type == UDMA_RX) { + al_reg_write32( + &udma->udma_regs->s2m.s2m_comp.cfg_application_ack, 0); + /* Ack time out */ + + } +#if 0 + if (udma->type == UDMA_TX) { + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_feature.reg_1); + al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1, reg); + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_feature.reg_3); + al_reg_write32(&udma->udma_regs->m2s.m2s_rd.data_cfg, reg); + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_feature.reg_4); + tmp = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_1c); + tmp &= ~0x1FFFF; + tmp |= reg & 0x1FFFF; + al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_1c, reg); + + reg = al_reg_read32(&udma->udma_regs->m2s.m2s_feature.reg_5); + al_reg_write32(&udma->udma_regs->m2s.axi_m2s.ostand_cfg, reg); + + /* Set AXI defauls */ + + reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.comp_wr_cfg_1); + reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK; + reg |= 0x1 << UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_SHIFT; + al_reg_write32(&udma->udma_regs->m2s.axi_m2s.comp_wr_cfg_1, reg); + + reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.data_rd_cfg_1); + reg &= ~UDMA_AXI_M2S_DATA_RD_CFG_1_ARBURST_MASK; + reg |= 0x1 << UDMA_AXI_M2S_DATA_RD_CFG_1_ARBURST_SHIFT; + al_reg_write32(&udma->udma_regs->m2s.axi_m2s.data_rd_cfg_1, reg); + + reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_1); + reg &= ~UDMA_AXI_M2S_DESC_RD_CFG_1_ARBURST_MASK; + reg |= 0x1 << UDMA_AXI_M2S_DESC_RD_CFG_1_ARBURST_SHIFT; + al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_1, reg); + + } else { + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_feature.reg_1); + al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_1, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_feature.reg_3); + al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_1, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_feature.reg_4); + al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_2c, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_feature.reg_5); + tmp = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_rd); + tmp &= ~UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_MASK; + tmp |= AL_REG_FIELD_GET(reg, + UDMA_S2M_FEATURE_REG_5_MAX_DESC_RD_OSTAND_MASK, + UDMA_S2M_FEATURE_REG_5_MAX_DESC_RD_OSTAND_SHIFT); + al_reg_write32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_rd, tmp); + + reg2 = al_reg_read32(&udma->udma_regs->s2m.s2m_feature.reg_6); + + tmp = AL_REG_FIELD_GET(reg, + UDMA_S2M_FEATURE_REG_5_MAX_DATA_WR_OSTAND_MASK, + UDMA_S2M_FEATURE_REG_5_MAX_DATA_WR_OSTAND_SHIFT); + + tmp |= AL_REG_FIELD_GET(reg2, + UDMA_S2M_FEATURE_REG_6_MAX_DATA_BEATS_WR_OSTAND_MASK, + UDMA_S2M_FEATURE_REG_6_MAX_DATA_BEATS_WR_OSTAND_SHIFT) + << UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_DATA_WR_SHIFT; + + tmp |= AL_REG_FIELD_GET(reg2, + UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_MASK, + UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_SHIFT) + << UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_SHIFT; + + tmp |= AL_REG_FIELD_GET(reg2, + UDMA_S2M_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_MASK, + UDMA_S2M_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_SHIFT) + << UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_SHIFT; + + + + al_reg_write32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_wr, tmp); + + tmp = al_reg_read32(&udma->udma_regs->s2m.s2m_comp.cfg_1c); + tmp &= ~UDMA_S2M_COMP_CFG_1C_ACK_FIFO_DEPTH_MASK; + tmp |= AL_REG_FIELD_GET(reg2, + UDMA_S2M_FEATURE_REG_6_MAX_STREAM_ACK_MASK, + UDMA_S2M_FEATURE_REG_6_MAX_STREAM_ACK_SHIFT) << UDMA_S2M_COMP_CFG_1C_ACK_FIFO_DEPTH_SHIFT; + al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_1c, tmp); + + /* Set AXI defaults */ + reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.comp_wr_cfg_1); + reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK; + reg |= 0x1 << UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_SHIFT; + reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK; + reg |= 0x2 << UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_SHIFT; + al_reg_write32(&udma->udma_regs->s2m.axi_s2m.comp_wr_cfg_1, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_4); + reg &= ~UDMA_AXI_S2M_DESC_RD_CFG_4_ARBURST_MASK; + reg |= 0x1<udma_regs->s2m.axi_s2m.desc_rd_cfg_4, reg); + + reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.data_wr_cfg_1); + reg &= ~UDMA_AXI_S2M_DATA_WR_CFG_1_AWBURST_MASK; + reg |= 0x1<udma_regs->s2m.axi_s2m.data_wr_cfg_1, reg); + + /* Set comp fifo depth */ + reg = al_reg_read32(&udma->udma_regs->s2m.s2m_comp.cfg_2c); + reg &= ~UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK; + reg |= 0x20<udma_regs->s2m.s2m_comp.cfg_2c, reg); + + + /* data fifo depth */ + reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_wr); + reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_MASK; + reg |= 0x20<udma_regs->s2m.axi_s2m.ostand_cfg_wr, reg); + + /* Ignore pkt Len Error */ + al_reg_write32(&udma->udma_regs->s2m.s2m.err_abort_mask, 1<<24); + } +#endif +} +/** + * misc queue configurations + * + * @param udma_q udma queue data structure + * + * @return 0 + */ +static int al_udma_q_config(struct al_udma_q *udma_q) +{ + uint32_t *reg_addr; + uint32_t val; + + if (udma_q->udma->type == UDMA_TX) { + reg_addr = &udma_q->q_regs->m2s_q.rlimit.mask; + + val = al_reg_read32(reg_addr); + // enable DMB + val &= ~UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_PAUSE_DMB; + al_reg_write32(reg_addr, val); + } + return 0; +} + +/** + * set the queue's completion configuration register + * + * @param udma_q udma queue data structure + * + * @return 0 + */ +static int al_udma_q_config_compl(struct al_udma_q *udma_q) +{ + uint32_t *reg_addr; + uint32_t val; + + if (udma_q->udma->type == UDMA_TX) + reg_addr = &udma_q->q_regs->m2s_q.comp_cfg; + else + reg_addr = &udma_q->q_regs->s2m_q.comp_cfg; + + val = al_reg_read32(reg_addr); + + if (udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE) + val &= ~UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE; + else + val |= UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE; + + if (udma_q->flags & AL_UDMA_Q_FLAGS_EN_COMP_COAL) + val &= ~UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL; + else + val |= UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL; + + al_reg_write32(reg_addr, val); + + /* set the completion queue size */ + if (udma_q->udma->type == UDMA_RX) { + val = al_reg_read32( + &udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c); + val &= ~UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK; + /* the register expects it to be in words */ + val |= (udma_q->cdesc_size >> 2) + & UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK; + al_reg_write32(&udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c + , val); + } + return 0; +} + +/** + * reset the queues pointers (Head, Tail, etc) and set the base addresses + * + * @param udma_q udma queue data structure + */ +static int al_udma_q_set_pointers(struct al_udma_q *udma_q) +{ + /* reset the descriptors ring pointers */ + /* assert descriptor base address aligned. */ + al_assert((AL_ADDR_LOW(udma_q->desc_phy_base) & + ~UDMA_M2S_Q_TDRBP_LOW_ADDR_MASK) == 0); + al_reg_write32(&udma_q->q_regs->rings.drbp_low, + AL_ADDR_LOW(udma_q->desc_phy_base)); + al_reg_write32(&udma_q->q_regs->rings.drbp_high, + AL_ADDR_HIGH(udma_q->desc_phy_base)); + + al_reg_write32(&udma_q->q_regs->rings.drl, udma_q->size); + + /* if completion ring update disabled */ + if (udma_q->cdesc_base_ptr == NULL) { + udma_q->flags |= AL_UDMA_Q_FLAGS_NO_COMP_UPDATE; + } else { + /* reset the completion descriptors ring pointers */ + /* assert completion base address aligned. */ + al_assert((AL_ADDR_LOW(udma_q->cdesc_phy_base) & + ~UDMA_M2S_Q_TCRBP_LOW_ADDR_MASK) == 0); + al_reg_write32(&udma_q->q_regs->rings.crbp_low, + AL_ADDR_LOW(udma_q->cdesc_phy_base)); + al_reg_write32(&udma_q->q_regs->rings.crbp_high, + AL_ADDR_HIGH(udma_q->cdesc_phy_base)); + + al_udma_q_config_compl(udma_q); + } + return 0; +} + +/************************ API functions ***************************************/ + +/* Initializations functions */ +/* + * Initialize the udma engine + */ +int al_udma_init(struct al_udma *udma, struct al_udma_params *udma_params) +{ + int i; + + al_assert(udma); + + if (udma_params->num_of_queues > DMA_MAX_Q) { + al_err("udma: invalid num_of_queues parameter\n"); + return -EINVAL; + } + + udma->udma_regs = udma_params->udma_reg; + udma->type = udma_params->type; + udma->num_of_queues = udma_params->num_of_queues; + + if (udma_params->name == NULL) + udma->name = ""; + else + udma->name = udma_params->name; + + udma->state = UDMA_DISABLE; + for (i = 0; i < DMA_MAX_Q; i++) { + udma->udma_q[i].status = AL_QUEUE_NOT_INITIALIZED; + } + /* initialize configuration registers to correct values */ + al_udma_set_defaults(udma); + al_dbg("udma [%s] initialized. base %p\n", udma->name, + udma->udma_regs); + return 0; +} + +/* + * Initialize the udma queue data structure + */ +int al_udma_q_init(struct al_udma *udma, uint32_t qid, + struct al_udma_q_params *q_params) +{ + struct al_udma_q *udma_q; + + al_assert(udma); + al_assert(q_params); + + if (qid >= udma->num_of_queues) { + al_err("udma: invalid queue id (%d)\n", qid); + return -EINVAL; + } + + if (udma->udma_q[qid].status == AL_QUEUE_ENABLED) { + al_err("udma: queue (%d) already enabled!\n", qid); + return -EIO; + } + + if (q_params->size < AL_UDMA_MIN_Q_SIZE) { + al_err("udma: queue (%d) size too small\n", qid); + return -EINVAL; + } + + if (q_params->size > AL_UDMA_MAX_Q_SIZE) { + al_err("udma: queue (%d) size too large\n", qid); + return -EINVAL; + } + + if (q_params->size & (q_params->size - 1)) { + al_err("udma: queue (%d) size (%d) must be power of 2\n", + q_params->size, qid); + return -EINVAL; + } + + udma_q = &udma->udma_q[qid]; + /* set the queue's regs base address */ + if (udma->type == UDMA_TX) + udma_q->q_regs = (union udma_q_regs __iomem *) + &udma->udma_regs->m2s.m2s_q[qid]; + else + udma_q->q_regs = (union udma_q_regs __iomem *) + &udma->udma_regs->s2m.s2m_q[qid]; + + udma_q->dev_id = q_params->dev_id; + udma_q->rev_id = q_params->rev_id; + udma_q->size = q_params->size; + udma_q->size_mask = q_params->size - 1; + udma_q->desc_base_ptr = q_params->desc_base; + udma_q->desc_phy_base = q_params->desc_phy_base; + udma_q->cdesc_base_ptr = q_params->cdesc_base; + udma_q->cdesc_phy_base = q_params->cdesc_phy_base; + udma_q->cdesc_size = q_params->cdesc_size; + + udma_q->next_desc_idx = 0; + udma_q->next_cdesc_idx = 0; + udma_q->end_cdesc_ptr = (uint8_t *) udma_q->cdesc_base_ptr + + (udma_q->size - 1) * udma_q->cdesc_size; + udma_q->comp_head_idx = 0; + udma_q->comp_head_ptr = (union al_udma_cdesc *)udma_q->cdesc_base_ptr; + udma_q->desc_ring_id = AL_UDMA_INITIAL_RING_ID; + udma_q->comp_ring_id = AL_UDMA_INITIAL_RING_ID; +#if 0 + udma_q->desc_ctrl_bits = AL_UDMA_INITIAL_RING_ID << + AL_M2S_DESC_RING_ID_SHIFT; +#endif + udma_q->pkt_crnt_descs = 0; + udma_q->flags = 0; + udma_q->status = AL_QUEUE_DISABLED; + udma_q->udma = udma; + udma_q->qid = qid; + + /* start hardware configuration: */ + al_udma_q_config(udma_q); + /* reset the queue pointers */ + al_udma_q_set_pointers(udma_q); + + /* enable the q */ + al_udma_q_enable(udma_q, 1); + + al_dbg("udma [%s %d]: %s q init. size 0x%x\n" + " desc ring info: phys base 0x%llx virt base %p\n" + " cdesc ring info: phys base 0x%llx virt base %p " + "entry size 0x%x", + udma_q->udma->name, udma_q->qid, + udma->type == UDMA_TX ? "Tx" : "Rx", + q_params->size, + (unsigned long long)q_params->desc_phy_base, + q_params->desc_base, + (unsigned long long)q_params->cdesc_phy_base, + q_params->cdesc_base, + q_params->cdesc_size); + + return 0; +} + +/* + * Reset a udma queue + */ +int al_udma_q_reset(struct al_udma_q *udma_q) +{ + unsigned int remaining_time = AL_UDMA_Q_RST_TOUT; + uint32_t *status_reg; + uint32_t *dcp_reg; + uint32_t *crhp_reg; + uint32_t *q_sw_ctrl_reg; + + al_assert(udma_q); + al_assert(udma_q->q_regs); + + /* De-assert scheduling and prefetch */ + al_udma_q_enable(udma_q, 0); + + /* Wait for scheduling and prefetch to stop */ + status_reg = &udma_q->q_regs->rings.status; + + while (remaining_time) { + uint32_t status = al_reg_read32(status_reg); + + if (!(status & (UDMA_M2S_Q_STATUS_PREFETCH | + UDMA_M2S_Q_STATUS_SCHEDULER))) + break; + + remaining_time--; + al_udelay(1); + } + + if (!remaining_time) { + al_err("udma [%s %d]: %s timeout waiting for prefetch and " + "scheduler disable\n", udma_q->udma->name, udma_q->qid, + __func__); + return -ETIME; + } + + /* Wait for the completion queue to reach to the same pointer as the + * prefetch stopped at ([TR]DCP == [TR]CRHP) */ + dcp_reg = &udma_q->q_regs->rings.dcp; + crhp_reg = &udma_q->q_regs->rings.crhp; + + while (remaining_time) { + uint32_t dcp = al_reg_read32(dcp_reg); + uint32_t crhp = al_reg_read32(crhp_reg); + + if (dcp == crhp) + break; + + remaining_time--; + al_udelay(1); + }; + + if (!remaining_time) { + al_err("udma [%s %d]: %s timeout waiting for dcp==crhp\n", + udma_q->udma->name, udma_q->qid, __func__); + return -ETIME; + } + + /* Assert the queue reset */ + if (udma_q->udma->type == UDMA_TX) + q_sw_ctrl_reg = &udma_q->q_regs->m2s_q.q_sw_ctrl; + else + q_sw_ctrl_reg = &udma_q->q_regs->s2m_q.q_sw_ctrl; + + al_reg_write32(q_sw_ctrl_reg, UDMA_M2S_Q_SW_CTRL_RST_Q); + + return 0; +} + +/* + * return (by reference) a pointer to a specific queue date structure. + */ +int al_udma_q_handle_get(struct al_udma *udma, uint32_t qid, + struct al_udma_q **q_handle) +{ + + al_assert(udma); + al_assert(q_handle); + + if (unlikely(qid >= udma->num_of_queues)) { + al_err("udma [%s]: invalid queue id (%d)\n", udma->name, qid); + return -EINVAL; + } + *q_handle = &udma->udma_q[qid]; + return 0; +} + +/* + * Change the UDMA's state + */ +int al_udma_state_set(struct al_udma *udma, enum al_udma_state state) +{ + uint32_t reg; + + al_assert(udma != NULL); + if (state == udma->state) + al_dbg("udma [%s]: requested state identical to " + "current state (%d)\n", udma->name, state); + + al_dbg("udma [%s]: change state from (%s) to (%s)\n", + udma->name, al_udma_states_name[udma->state], + al_udma_states_name[state]); + + reg = 0; + switch (state) { + case UDMA_DISABLE: + reg |= UDMA_M2S_CHANGE_STATE_DIS; + break; + case UDMA_NORMAL: + reg |= UDMA_M2S_CHANGE_STATE_NORMAL; + break; + case UDMA_ABORT: + reg |= UDMA_M2S_CHANGE_STATE_ABORT; + break; + default: + al_err("udma: invalid state (%d)\n", state); + return -EINVAL; + } + + if (udma->type == UDMA_TX) + al_reg_write32(&udma->udma_regs->m2s.m2s.change_state, reg); + else + al_reg_write32(&udma->udma_regs->s2m.s2m.change_state, reg); + + udma->state = state; + return 0; +} + +/* + * return the current UDMA hardware state + */ +enum al_udma_state al_udma_state_get(struct al_udma *udma) +{ + uint32_t state_reg; + uint32_t comp_ctrl; + uint32_t stream_if; + uint32_t data_rd; + uint32_t desc_pref; + + if (udma->type == UDMA_TX) + state_reg = al_reg_read32(&udma->udma_regs->m2s.m2s.state); + else + state_reg = al_reg_read32(&udma->udma_regs->s2m.s2m.state); + + comp_ctrl = AL_REG_FIELD_GET(state_reg, + UDMA_M2S_STATE_COMP_CTRL_MASK, + UDMA_M2S_STATE_COMP_CTRL_SHIFT); + stream_if = AL_REG_FIELD_GET(state_reg, + UDMA_M2S_STATE_STREAM_IF_MASK, + UDMA_M2S_STATE_STREAM_IF_SHIFT); + data_rd = AL_REG_FIELD_GET(state_reg, + UDMA_M2S_STATE_DATA_RD_CTRL_MASK, + UDMA_M2S_STATE_DATA_RD_CTRL_SHIFT); + desc_pref = AL_REG_FIELD_GET(state_reg, + UDMA_M2S_STATE_DESC_PREF_MASK, + UDMA_M2S_STATE_DESC_PREF_SHIFT); + + al_assert(comp_ctrl != UDMA_STATE_RESERVED); + al_assert(stream_if != UDMA_STATE_RESERVED); + al_assert(data_rd != UDMA_STATE_RESERVED); + al_assert(desc_pref != UDMA_STATE_RESERVED); + + /* if any of the states is abort then return abort */ + if ((comp_ctrl == UDMA_STATE_ABORT) || (stream_if == UDMA_STATE_ABORT) + || (data_rd == UDMA_STATE_ABORT) + || (desc_pref == UDMA_STATE_ABORT)) + return UDMA_ABORT; + + /* if any of the states is normal then return normal */ + if ((comp_ctrl == UDMA_STATE_NORMAL) + || (stream_if == UDMA_STATE_NORMAL) + || (data_rd == UDMA_STATE_NORMAL) + || (desc_pref == UDMA_STATE_NORMAL)) + return UDMA_NORMAL; + + return UDMA_IDLE; +} + +/* + * Action handling + */ + +/* + * get next completed packet from completion ring of the queue + */ +uint32_t al_udma_cdesc_packet_get( + struct al_udma_q *udma_q, + volatile union al_udma_cdesc **cdesc) +{ + uint32_t count; + volatile union al_udma_cdesc *curr; + uint32_t comp_flags; + + /* this function requires the completion ring update */ + al_assert(!(udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE)); + + /* comp_head points to the last comp desc that was processed */ + curr = udma_q->comp_head_ptr; + comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta); + + /* check if the completion descriptor is new */ + if (unlikely(al_udma_new_cdesc(udma_q, comp_flags) == AL_FALSE)) + return 0; + /* if new desc found, increment the current packets descriptors */ + count = udma_q->pkt_crnt_descs + 1; + while (!cdesc_is_last(comp_flags)) { + curr = al_cdesc_next_update(udma_q, curr); + comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta); + if (unlikely(al_udma_new_cdesc(udma_q, comp_flags) + == AL_FALSE)) { + /* the current packet here doesn't have all */ + /* descriptors completed. log the current desc */ + /* location and number of completed descriptors so */ + /* far. then return */ + udma_q->pkt_crnt_descs = count; + udma_q->comp_head_ptr = curr; + return 0; + } + count++; + /* check against max descs per packet. */ + al_assert(count <= udma_q->size); + } + /* return back the first descriptor of the packet */ + *cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx); + udma_q->pkt_crnt_descs = 0; + udma_q->comp_head_ptr = al_cdesc_next_update(udma_q, curr); + + al_dbg("udma [%s %d]: packet completed. first desc %p (ixd 0x%x)" + " descs %d\n", udma_q->udma->name, udma_q->qid, *cdesc, + udma_q->next_cdesc_idx, count); + + return count; +} + +/** + * enable/disable udma queue + * + * @param udma_q udma queue data structure + * @param enable none zero value enables the queue, zero means disable + * + * @return 0 + */ +int al_udma_q_enable(struct al_udma_q *udma_q, int enable) +{ + uint32_t reg; + + al_assert(udma_q); + al_assert(udma_q->q_regs); + + reg = al_reg_read32(&udma_q->q_regs->rings.cfg); + + if (enable) { + reg |= (UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING); + udma_q->status = AL_QUEUE_ENABLED; + } else { + reg &= ~(UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING); + udma_q->status = AL_QUEUE_DISABLED; + } + al_reg_write32(&udma_q->q_regs->rings.cfg, reg); + return 0; +} + +/** @} end of UDMA group */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_init_ccu_regs.h b/target/linux/alpine/files/arch/arm/mach-alpine/al_init_ccu_regs.h new file mode 100644 index 00000000000000..4a9f694f79a8fb --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_init_ccu_regs.h @@ -0,0 +1,46 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#ifndef __AL_HAL_CCU_H__ +#define __AL_HAL_CCU_H__ + + +#define AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET 0x4000 +#define AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET 0x5000 +#define AL_CCU_SPECULATION_CONTROL_OFFSET 0x4 +#define AL_CCU_SECURE_ACCESS_OFFSET 0x8 + + +#endif /* __AL_HAL_CCU_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_init_cpu_resume.h b/target/linux/alpine/files/arch/arm/mach-alpine/al_init_cpu_resume.h new file mode 100644 index 00000000000000..8b9c1dbf544f6d --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_init_cpu_resume.h @@ -0,0 +1,57 @@ +/* + * Annapurna labs resume address. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef AL_CPU_RESUME_H_ +#define AL_CPU_RESUME_H_ + +struct al_cpu_resume_regs_per_cpu { + /* Flags */ + uint32_t flags; + + /* Resume address */ + uintptr_t resume_addr; +}; + +struct al_cpu_resume_regs { + /* Watermark for validating the CPU resume struct */ + uint32_t watermark; + + /* Various flags to control the resume behavior */ + uint32_t flags; + + /* Per cpu regs */ + struct al_cpu_resume_regs_per_cpu per_cpu[]; +}; + +/* The expected magic number for validating the resume addresses */ +#define AL_CPU_RESUME_MAGIC_NUM 0xf0e1d200 +#define AL_CPU_RESUME_MAGIC_NUM_MASK 0xffffff00 + +/* The expected minimal version number for validating the capabilities */ +#define AL_CPU_RESUME_MIN_VER 0x000000c3 +#define AL_CPU_RESUME_MIN_VER_MASK 0x000000ff + +/* General resume flags*/ +#define AL_CPU_RESUME_FLG_SWITCH_TO_NS_DIS (1 << 0) + +/* Per-cpu resume flags */ +/* Don't init anything outside the cluster */ +#define AL_CPU_RESUME_FLG_PERCPU_EXTERNAL_SKIP (1 << 0) +/* Don't init anything outside the core */ +#define AL_CPU_RESUME_FLG_PERCPU_CLUSTER_SKIP (2 << 0) + +#endif /* AL_CPU_RESUME_H_ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_init_sys_fabric.c b/target/linux/alpine/files/arch/arm/mach-alpine/al_init_sys_fabric.c new file mode 100644 index 00000000000000..6d421dc007f3b1 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_init_sys_fabric.c @@ -0,0 +1,86 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#include +#include +#include "al_hal_nb_regs.h" +#include "al_init_sys_fabric.h" +#include "al_init_ccu_regs.h" +#include "al_init_sys_fabric_offsets.h" + +/* definition currently missing from nb_regs */ +#define AL_NB_ACF_MISC_READ_BYPASS (1 << 30) + +/* initialization of different units */ +void al_nbservice_init(void __iomem *nb_regs_address, + al_bool dev_ord_relax) +{ + struct al_nb_regs __iomem *nb_regs = nb_regs_address; + + /* allow reads to bypass writes to different addresses */ + al_reg_write32_masked( + &(nb_regs->global.acf_misc), + AL_NB_ACF_MISC_READ_BYPASS, + (dev_ord_relax) ? AL_NB_ACF_MISC_READ_BYPASS : 0); +} + +void al_ccu_init(void __iomem *ccu_address, al_bool iocc) +{ + /* enable snoop */ + if (iocc) { + al_reg_write32(ccu_address + AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET + , 1); + al_reg_write32(ccu_address + AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET + , 1); + } + /* disable speculative fetches from masters */ + al_reg_write32(ccu_address + AL_CCU_SPECULATION_CONTROL_OFFSET, 7); +} + +void al_nbservice_clear_settings(void __iomem *nb_regs_address) +{ + struct al_nb_regs __iomem *nb_regs = nb_regs_address; + + al_reg_write32_masked( + &(nb_regs->global.acf_misc), + AL_NB_ACF_MISC_READ_BYPASS, + 0); +} + +void al_ccu_clear_settings(void __iomem *ccu_address) +{ + al_reg_write32(ccu_address + AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET, 0); + al_reg_write32(ccu_address + AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET, 0); +} diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_init_sys_fabric.h b/target/linux/alpine/files/arch/arm/mach-alpine/al_init_sys_fabric.h new file mode 100644 index 00000000000000..bb04c9369deb39 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_init_sys_fabric.h @@ -0,0 +1,100 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * The fabric consists of CCU, SMMU(*2), GIC(*2), and NB registers. + * + * The fabric hal provides simple api for fabric initialization, and not a + * complete coverage of the unit's functionality. + * main objective: to keep consistent fabric initialization between + * different environments. Only functions used by all environments added. + * + * GIC and SMMU are only handled when initializing the secure context. + * Both internal and external gic are initialized in that aspect. + */ + +#ifndef __AL_HAL_SYS_FABRIC_H__ +#define __AL_HAL_SYS_FABRIC_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Initialize NB service regs + * + * @param nb_regs_address + * address of nb service registers + * @param dev_ord_relax + * optimization: relax ordering between device-access reads and + * writes to different addresses. + */ +void al_nbservice_init(void __iomem *nb_regs_address, + al_bool dev_ord_relax); + +/** + * Initialize CCU + * + * @param ccu_address + * address of ccu registers + * @param iocc + * enable I/O cache coherency + */ +void al_ccu_init(void __iomem *ccu_address, al_bool iocc); + +/** + * Clear NB service regs settings + * + * @param nb_regs_address + * address of nb service registers + */ +void al_nbservice_clear_settings(void __iomem *nb_regs_address); + +/** + * Clear ccu settings + * + * @param ccu_address + * address of ccu registers + */ +void al_ccu_clear_settings(void __iomem *ccu_address); + + +#ifdef __cplusplus +} +#endif + +#endif /* AL_HAL_SYS_FABRIC_H_ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/al_init_sys_fabric_offsets.h b/target/linux/alpine/files/arch/arm/mach-alpine/al_init_sys_fabric_offsets.h new file mode 100644 index 00000000000000..eb624f262566f9 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/al_init_sys_fabric_offsets.h @@ -0,0 +1,51 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#ifndef __AL_HAL_SYS_FABRIC_OFFSETS_H__ +#define __AL_HAL_SYS_FABRIC_OFFSETS_H__ + +/* + * Offsets of System-Fabric sub-units + * Inside AL northbridge-PASW + */ + +#define AL_NB_SMMU0_OFFSET 0x30000 /*SMMU for I/O fabric 0 */ +#define AL_NB_SMMU1_OFFSET 0x40000 /*SMMU for I/O fabric 1 */ +#define AL_NB_SERVICE_OFFSET 0x70000 +#define AL_NB_CCU_OFFSET 0x90000 +#define AL_NB_GIC_OFFSET(id) (0 + (id)*0x8000) + + +#endif /* __AL_HAL_SYS_FABRIC_OFFSETS_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/alpine_cpu_pm.c b/target/linux/alpine/files/arch/arm/mach-alpine/alpine_cpu_pm.c new file mode 100644 index 00000000000000..61a7bb13291689 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/alpine_cpu_pm.c @@ -0,0 +1,191 @@ +/* + * Alpine CPU Power Management Services + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include "al_init_cpu_resume.h" + +/* NB registers */ +#undef AL_NB_SERVICE_BASE +#define AL_NB_SERVICE_BASE al_nb_service_base +#define AL_NB_INIT_CONTROL (AL_NB_SERVICE_BASE + 0x8) +#define AL_NB_POWER_CONTROL(cpu) (AL_NB_SERVICE_BASE + \ + 0x2000 + (cpu)*0x100 + 0x20) +#define AL_NB_POWER_STATUS(cpu) (AL_NB_SERVICE_BASE + \ + 0x2000 + (cpu)*0x100 + 0x24) + +int alpine_suspend_finish(unsigned long); + +static void __iomem *al_nb_service_base; +static struct al_cpu_resume_regs __iomem *al_cpu_resume_regs; +static int suspend_wakeup_supported; + +int alpine_cpu_suspend_wakeup_supported(void) +{ + return suspend_wakeup_supported; +} +EXPORT_SYMBOL(alpine_cpu_suspend_wakeup_supported); + +void alpine_cpu_wakeup(unsigned int cpu, uintptr_t resume_addr) +{ + /* + * Cancel previous powerdown request + * This can happen if the CPU is "hot plugged in" after being powered + * off due to being "hot plugged out" - see 'alpine_cpu_die' below. + */ + writel(0, (void __iomem *)AL_NB_POWER_CONTROL(cpu)); + + /* Set CPU resume address */ + writel(resume_addr, &al_cpu_resume_regs->per_cpu[cpu].resume_addr); + + /* Release from reset - has effect once per SoC reset */ + writel(readl(AL_NB_INIT_CONTROL) | (1 << cpu), AL_NB_INIT_CONTROL); +} +EXPORT_SYMBOL(alpine_cpu_wakeup); + +void alpine_cpu_die(unsigned int cpu) +{ + if (!suspend_wakeup_supported) { + pr_err("Annapurna Labs PM components not found\n"); + return; + } + + /* request powerdown. cpu will be turned off when it issues WFI */ + writel(0x3 | (0x3 << 20) | (0x3 << 18), (void __iomem *)AL_NB_POWER_CONTROL(cpu)); + + alpine_suspend_finish(0); + + BUG(); /*execution should never reach this point */ +} +EXPORT_SYMBOL(alpine_cpu_die); + +/* Needed by kexec and platform_can_cpu_hotplug() */ +int alpine_cpu_kill(unsigned int cpu) +{ + int k; + u32 val; + + /* Wait until the dying CPU enters the powerdown state. */ + for (k = 0; k < 100; k++) { + val = readl((void __iomem *)AL_NB_POWER_STATUS(cpu)); + if ((val & 0x3) == 0x3) { + /* + * We need a delay here to ensure that the dying CPU + * can reach the WFI state. + */ + msleep(5); + return 1; + } + msleep(1); + } + + return 0; +} +EXPORT_SYMBOL(alpine_cpu_kill); + +void alpine_cpu_suspend(void) +{ + unsigned int cpu = smp_processor_id(); + + /* Write the resume address */ + writel(virt_to_phys(cpu_resume), + &al_cpu_resume_regs->per_cpu[cpu].resume_addr); + writel(AL_CPU_RESUME_FLG_PERCPU_CLUSTER_SKIP, + &al_cpu_resume_regs->per_cpu[cpu].flags); + + /* request powerdown. cpu will be turned off when it issues WFI + * bits 0:1 - request core powerdown + * bits 20:21 - do not wake-up from i/o gic + */ + writel(0x3 | (3<<20), (void __iomem *)AL_NB_POWER_CONTROL(cpu)); + /*verify the write got through*/ + readl((void __iomem *)AL_NB_POWER_CONTROL(cpu)); + + cpu_pm_enter(); + cpu_suspend(0, alpine_suspend_finish); + + /*clear the powerdown request*/ + writel(0, (void __iomem *)AL_NB_POWER_CONTROL(cpu)); + /*verify the write got through*/ + readl((void __iomem *)AL_NB_POWER_CONTROL(cpu)); + + cpu_pm_exit(); +} +EXPORT_SYMBOL(alpine_cpu_suspend); + +#ifdef CONFIG_PM +static int al_pm_valid(suspend_state_t state) +{ + return ((state == PM_SUSPEND_STANDBY) || (state == PM_SUSPEND_MEM)); +} + +static int al_pm_enter(suspend_state_t state) +{ + if (al_pm_valid(state)) + alpine_cpu_suspend(); + else + return -EINVAL; + + return 0; +} + +static const struct platform_suspend_ops al_pm_ops = { + .enter = al_pm_enter, + .valid = al_pm_valid, +}; +#endif + +void __init alpine_cpu_pm_init(void) +{ + struct device_node *np; + + np = of_find_compatible_node( + NULL, NULL, "annapurna-labs,al-nb-service"); + al_nb_service_base = of_iomap(np, 0); + + np = of_find_compatible_node( + NULL, NULL, "annapurna-labs,al-cpu-resume"); + al_cpu_resume_regs = + (struct al_cpu_resume_regs __iomem *)of_iomap(np, 0); + + suspend_wakeup_supported = + al_nb_service_base && + al_cpu_resume_regs && + ((readl(&al_cpu_resume_regs->watermark) & + AL_CPU_RESUME_MAGIC_NUM_MASK) == AL_CPU_RESUME_MAGIC_NUM) && + ((readl(&al_cpu_resume_regs->watermark) & + AL_CPU_RESUME_MIN_VER_MASK) >= AL_CPU_RESUME_MIN_VER); + +#ifdef CONFIG_PM + if (suspend_wakeup_supported) { + suspend_set_ops(&al_pm_ops); + } else { + pr_err("Annapurna Labs PM components not found\n"); + return; + } +#endif +} + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/alpine_cpu_pm.h b/target/linux/alpine/files/arch/arm/mach-alpine/alpine_cpu_pm.h new file mode 100644 index 00000000000000..5179e697c492fe --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/alpine_cpu_pm.h @@ -0,0 +1,26 @@ +/* + * Low-level power-management support for Alpine platform. + * + * Copyright (C) 2015 Annapurna Labs Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ALPINE_CPU_PM_H__ +#define __ALPINE_CPU_PM_H__ + +/* Alpine CPU Power Management Services Initialization */ +void alpine_cpu_pm_init(void); + +/* Wake-up a CPU */ +int alpine_cpu_wakeup(unsigned int phys_cpu, uint32_t phys_resume_addr); + +#endif /* __ALPINE_CPU_PM_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/alpine_machine.c b/target/linux/alpine/files/arch/arm/mach-alpine/alpine_machine.c new file mode 100644 index 00000000000000..ebc3d5ef6bd949 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/alpine_machine.c @@ -0,0 +1,323 @@ +/* + * Device Tree support for Alpine platforms. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "core.h" + +#define WDTLOAD 0x000 + #define LOAD_MIN 0x00000001 + #define LOAD_MAX 0xFFFFFFFF +#define WDTVALUE 0x004 +#define WDTCONTROL 0x008 + /* control register masks */ + #define INT_ENABLE (1 << 0) + #define RESET_ENABLE (1 << 1) +#define WDTLOCK 0xC00 + #define UNLOCK 0x1ACCE551 + #define LOCK 0x00000001 + +#define SERDES_NUM_GROUPS 4 +#define SERDES_GROUP_SIZE 0x400 + +static void __iomem *wd0_base; +static void __iomem *serdes_base; + +static const __initconst struct of_device_id clk_match[] = { + { .compatible = "fixed-clock", .data = of_fixed_clk_setup, }, + {} +}; + +static void __init al_timer_init(void) +{ + struct device_node *np; + + /* Find the first watchdog and make sure it is not disabled */ + np = of_find_compatible_node( + NULL, NULL, "arm,sp805"); + + if (np && of_device_is_available(np)) { + wd0_base = of_iomap(np, 0); + BUG_ON(!wd0_base); + } else { + wd0_base = NULL; + } + + /* Timer initialization */ + of_clk_init(NULL); + timer_probe(); +} + +static void al_power_off(void) +{ + printk(KERN_EMERG "Unable to shutdown\n"); +} + +static void al_restart(enum reboot_mode mode, const char *cmd) +{ + if (!wd0_base) { + pr_err("%s: Not supported!\n", __func__); + } else { + writel(UNLOCK, wd0_base + WDTLOCK); + writel(LOAD_MIN, wd0_base + WDTLOAD); + writel(INT_ENABLE | RESET_ENABLE, wd0_base + WDTCONTROL); + } + + while (1) + ; +} + +static void __init al_map_io(void) +{ + /* Needed for early printk to work */ + struct map_desc uart_map_desc[1]; + + uart_map_desc[0].virtual = (unsigned long)AL_UART_BASE(0); + uart_map_desc[0].pfn = __phys_to_pfn(AL_UART_BASE(0)); + uart_map_desc[0].length = SZ_64K; + uart_map_desc[0].type = MT_DEVICE; + + iotable_init(uart_map_desc, ARRAY_SIZE(uart_map_desc)); +} + +static void __init al_init_irq(void) +{ + irqchip_init(); + + if (al_msix_init() != 0) + pr_err("%s: al_msix_init() failed!\n", __func__); +} + +static void __init al_serdes_resource_init(void) +{ + struct device_node *np; + + /* Find the serdes node and make sure it is not disabled */ + np = of_find_compatible_node(NULL, NULL, "annapurna-labs,al-serdes"); + + if (np && of_device_is_available(np)) { + serdes_base = of_iomap(np, 0); + BUG_ON(!serdes_base); + } else { + pr_err("%s: init serdes regs base failed!\n", __func__); + serdes_base = NULL; + } +} + +void __iomem *alpine_serdes_resource_get(u32 group) +{ + void __iomem *base = NULL; + + if (group >= SERDES_NUM_GROUPS) + return NULL; + + if (serdes_base) + base = serdes_base + group * SERDES_GROUP_SIZE; + + return base; +} +EXPORT_SYMBOL(alpine_serdes_resource_get); + +static struct alpine_serdes_eth_group_mode { + struct mutex lock; + enum alpine_serdes_eth_mode mode; + bool mode_set; +} alpine_serdes_eth_group_mode[SERDES_NUM_GROUPS] = { + { + .lock = __MUTEX_INITIALIZER(alpine_serdes_eth_group_mode[0].lock), + .mode_set = false, + }, + { + .lock = __MUTEX_INITIALIZER(alpine_serdes_eth_group_mode[1].lock), + .mode_set = false, + }, + { + .lock = __MUTEX_INITIALIZER(alpine_serdes_eth_group_mode[2].lock), + .mode_set = false, + }, + { + .lock = __MUTEX_INITIALIZER(alpine_serdes_eth_group_mode[3].lock), + .mode_set = false, + }}; + +int alpine_serdes_eth_mode_set( + u32 group, + enum alpine_serdes_eth_mode mode) +{ + struct alpine_serdes_eth_group_mode *group_mode = + &alpine_serdes_eth_group_mode[group]; + + if (!serdes_base) + return -EINVAL; + + if (group >= SERDES_NUM_GROUPS) + return -EINVAL; + + mutex_lock(&group_mode->lock); + + if (!group_mode->mode_set || (group_mode->mode != mode)) { + struct al_serdes_obj obj; + struct al_serdes_adv_tx_params tx_params[AL_SRDS_NUM_LANES]; + struct al_serdes_adv_rx_params rx_params[AL_SRDS_NUM_LANES]; + int i; + + al_serdes_handle_init(serdes_base, &obj); + + /* save group params */ + for (i = 0; i < AL_SRDS_NUM_LANES; i++) { + al_serdes_tx_advanced_params_get( + &obj, + group, + i, + &tx_params[i]); + al_serdes_rx_advanced_params_get( + &obj, + group, + i, + &rx_params[i]); + } + + if (mode == ALPINE_SERDES_ETH_MODE_SGMII) + al_serdes_mode_set_sgmii(&obj, group); + else + al_serdes_mode_set_kr(&obj, group); + + /* restore group params */ + for (i = 0; i < AL_SRDS_NUM_LANES; i++) { + al_serdes_tx_advanced_params_set( + &obj, + group, + i, + &tx_params[i]); + al_serdes_rx_advanced_params_set( + &obj, + group, + i, + &rx_params[i]); + } + + group_mode->mode = mode; + group_mode->mode_set = true; + } + + mutex_unlock(&group_mode->lock); + + return 0; +} +EXPORT_SYMBOL(alpine_serdes_eth_mode_set); + +void alpine_serdes_eth_group_lock(u32 group) +{ + struct alpine_serdes_eth_group_mode *group_mode = + &alpine_serdes_eth_group_mode[group]; + + mutex_lock(&group_mode->lock); +} +EXPORT_SYMBOL(alpine_serdes_eth_group_lock); + +void alpine_serdes_eth_group_unlock(u32 group) +{ + struct alpine_serdes_eth_group_mode *group_mode = + &alpine_serdes_eth_group_mode[group]; + + mutex_unlock(&group_mode->lock); +} +EXPORT_SYMBOL(alpine_serdes_eth_group_unlock); + +static void __init al_init(void) +{ + pm_power_off = al_power_off; + + /* + * Power Management Services Initialization + * When running in SMP this should be done earlier + */ +#ifndef CONFIG_SMP + alpine_cpu_pm_init(); +#endif + + /* fabric uses a notifier for device registration, + * Hence it must be initialized before registering + * any devices + **/ + al_fabric_init(); + + al_serdes_resource_init(); + + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); +} + +static const char *al_match[] __initdata = { + "annapurna-labs,alpine", + NULL, +}; + +unsigned int al_spin_lock_wfe_enable __read_mostly = 0; +EXPORT_SYMBOL(al_spin_lock_wfe_enable); + +static int __init spin_lock_wfe_enable(char *str) +{ + get_option(&str, &al_spin_lock_wfe_enable); + if (al_spin_lock_wfe_enable) + al_spin_lock_wfe_enable = 1; + return 0; +} + +early_param("spin_lock_wfe_enable", spin_lock_wfe_enable); + +unsigned int al_gettimeofday_use_jiffies __read_mostly = 0; +EXPORT_SYMBOL(al_gettimeofday_use_jiffies); + +static int __init gettimeofday_use_jiffies(char *str) +{ + get_option(&str, &al_gettimeofday_use_jiffies); + if (al_gettimeofday_use_jiffies) + al_gettimeofday_use_jiffies = 1; + return 0; +} + +early_param("gettimeofday_use_jiffies", gettimeofday_use_jiffies); + +DT_MACHINE_START(AL_DT, "AnnapurnaLabs Alpine (Device Tree)") + .smp = smp_ops(al_smp_ops), + .map_io = al_map_io, + .init_irq = al_init_irq, + .init_time = al_timer_init, + .init_machine = al_init, + .dt_compat = al_match, + .restart = al_restart, +MACHINE_END diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/core.h b/target/linux/alpine/files/arch/arm/mach-alpine/core.h new file mode 100644 index 00000000000000..d4a050b009e0c5 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/core.h @@ -0,0 +1,29 @@ +/* + * linux/arch/arm/mach-alpine/core.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +extern struct smp_operations __initdata al_smp_ops; + +#ifdef CONFIG_PCI_MSI +int al_msix_init(void); +#else +static inline int al_msix_init(void) +{ + return 0; +} +#endif + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_fabric.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_fabric.h new file mode 100644 index 00000000000000..45640cc54dca45 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_fabric.h @@ -0,0 +1,124 @@ +/* + * linux/arch/arm/mach-alpine/include/mach/al_fabric.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __AL_FABRIC_H__ +#define __AL_FABRIC_H__ + +/* + * North Bridge cause interrupt register definitions + */ +/* How many SYS Fabric IRQ Group instances exist in the system */ +#define AL_FABRIC_INSTANCE_N 4 + +/* Number of SYS Fabric IRQ */ +#define AL_FABRIC_IRQ_N 32 + +/* Cross trigger interrupt */ +#define AL_FABRIC_IRQ_NCTI_0 0 +#define AL_FABRIC_IRQ_NCTI_1 1 +#define AL_FABRIC_IRQ_NCTI_2 2 +#define AL_FABRIC_IRQ_NCTI_3 3 +/* Communications channel receive */ +#define AL_FABRIC_IRQ_COMMRX_0 4 +#define AL_FABRIC_IRQ_COMMRX_1 5 +#define AL_FABRIC_IRQ_COMMRX_2 6 +#define AL_FABRIC_IRQ_COMMRX_3 7 +/* Communication channel transmit */ +#define AL_FABRIC_IRQ_COMMTX_0 8 +#define AL_FABRIC_IRQ_COMMTX_1 9 +#define AL_FABRIC_IRQ_COMMTX_2 10 +#define AL_FABRIC_IRQ_COMMTX_3 11 +/* Write logging FIFO has valid transactions */ +#define AL_FABRIC_IRQ_WR_LOG_FIFO_VALID_M0 12 +/* Emulation write fifo log is wrapped */ +#define AL_FABRIC_IRQ_WR_LOG_FIFO_WRAP_A0 12 +/* Write logging FIFO wrap occurred */ +#define AL_FABRIC_IRQ_WR_LOG_FIFO_WRAP_M0 13 +/* Emulation write fifo log is full (new pushes might corrupt data) */ +#define AL_FABRIC_IRQ_WR_LOG_FIFO_FULL_A0 13 +/* Write logging FIFO is full */ +#define AL_FABRIC_IRQ_WR_LOG_FIFO_FULL_M0 14 +/* Emulation write fifo log is wrapped */ +#define AL_FABRIC_IRQ_WR_LOG_FIFO_WRAP_1_A0 14 +/* Reserved, read undefined must write as zeros. */ +#define AL_FABRIC_IRQ_RESERVED_15_15_M0 15 +/* Emulation write fifo log is full (new pushes might corrupt data) */ +#define AL_FABRIC_IRQ_WR_LOG_FIFO_FULL_1_A0 15 +/* Error indicator for AXI write transactions with a BRESP error */ +#define AL_FABRIC_IRQ_CPU_AXIERRIRQ 16 +/* Error indicator for: L2 RAM double-bit ECC error, illegal write */ +#define AL_FABRIC_IRQ_CPU_INTERRIRQ 17 +/* Coherent fabric error summary interrupt */ +#define AL_FABRIC_IRQ_ACF_ERRORIRQ 18 +/* DDR Controller ECC Correctable error summary interrupt */ +#define AL_FABRIC_IRQ_MCTL_ECC_CORR_ERR 19 +/* DDR Controller ECC Uncorrectable error summary interrupt */ +#define AL_FABRIC_IRQ_MCTL_ECC_UNCORR_ERR 20 +/* DRAM parity error interrupt */ +#define AL_FABRIC_IRQ_MCTL_PARITY_ERR 21 +/* Reserved, not functional */ +#define AL_FABRIC_IRQ_MCTL_WDATARAM_PAR 22 +/* Reserved */ +#define AL_FABRIC_IRQ_MCTL_RSVRD 23 +/* SB PoS error */ +#define AL_FABRIC_IRQ_SB_POS_ERR 24 +/* Received msix is not mapped to local GIC or IO-GIC spin */ +#define AL_FABRIC_IRQ_MSIX_ERR_INT 25 +/* Coresight timestamp overflow */ +#define AL_FABRIC_IRQ_CORESIGHT_TS_OVERFLOW 26 +/* Write data parity error from SB channel 0. */ +#define AL_FABRIC_IRQ_SB0_WRDATA_PERR 27 +/* Write data parity error from SB channel 1. */ +#define AL_FABRIC_IRQ_SB1_WRDATA_PERR 28 +/* Read data parity error from SB slaves. */ +#define AL_FABRIC_IRQ_SB_SLV_RDATA_PERR 29 +/* Logged read transaction is received */ +#define AL_FABRIC_IRQ_RD_LOG_VALID 30 +/* Reserved, read undefined must write as zeros. */ +#define AL_FABRIC_IRQ_RESERVED_31_31_M0 31 +/* Write logging FIFO has valid transactions */ +#define AL_FABRIC_IRQ_WR_LOG_FIFO_VALID_A0 31 + + +/** + * Get SW interrupt index corresponding to a given sys fabric irq index. + * + * @param idx + * The SYS Fabric IRQ Group index + * @param irq + * The SYS Fabric IRQ index (use AL_FABRIC_IRQ_*) + * + * @returns Software interrupt index + * + * Usecase example - inside your module, get the SW irq using the API, and bind + * it to your handler: + * irq = al_fabric_get_cause_irq(0, AL_FABRIC_IRQ_); + * request_irq(irq, irq_handler, ...); + */ +int al_fabric_get_cause_irq(unsigned int idx, int irq); + +/** + * Check if Hardware Cache-Coherency is enabled or not + * @return 0 if Hardware Cache-Coherency is not enabled and a positive number + * otherwise + */ +int al_fabric_hwcc_enabled(void); + +int al_fabric_init(void); + +#endif /* __AL_FABRIC_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_common.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_common.h new file mode 100644 index 00000000000000..08ded158c468d4 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_common.h @@ -0,0 +1,68 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_common HAL Common Layer + * Includes all common header files used by HAL + * @{ + * @file al_hal_common.h + * + */ + +#ifndef __AL_HAL_COMMON_H__ +#define __AL_HAL_COMMON_H__ + +#include +#include +#include +#include + +/* Get the maximal value out of two typed values */ +#define al_max_t(type, x, y) ({ \ + type __max1 = (x); \ + type __max2 = (y); \ + __max1 > __max2 ? __max1 : __max2; }) + +/* Get the minimal value out of two typed values */ +#define al_min_t(type, x, y) ({ \ + type __min1 = (x); \ + type __min2 = (y); \ + __min1 < __min2 ? __min1 : __min2; }) + +/* Get the number of elements in an array */ +#define AL_ARR_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +/** @} end of Common group */ +#endif /* __AL_HAL_COMMON_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_iofic.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_iofic.h new file mode 100644 index 00000000000000..fad09171e73113 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_iofic.h @@ -0,0 +1,200 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_interrupts Common I/O Fabric Interrupt Controller + * This HAL provides the API for programming the Common I/O Fabric Interrupt + * Controller (IOFIC) found in most of the units attached to the I/O Fabric of + * Alpine platform + * @{ + * @file al_hal_iofic.h + * + * @brief Header file for the interrupt controller that's embedded in various units + * + */ + +#ifndef __AL_HAL_IOFIC_H__ +#define __AL_HAL_IOFIC_H__ + +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +#define AL_IOFIC_MAX_GROUPS 4 + +/* + * Configurations + */ + +/** + * Configure the interrupt controller registers, actual interrupts are still + * masked at this stage. + * + * @param regs_base regs pointer to interrupt controller registers + * @param group the interrupt group. + * @param flags flags of Interrupt Control Register + * + * @return 0 on success. -EINVAL otherwise. + */ +int al_iofic_config(void __iomem *regs_base, int group, + uint32_t flags); + +/** + * configure the moderation timer resolution for a given group + * Applies for both msix and legacy mode. + * + * @param regs_base pointer to unit registers + * @param group the interrupt group + * @param resolution resolution of the timer interval, the resolution determines the rate + * of decrementing the interval timer, setting value N means that the interval + * timer will be decremented each (N+1) * (0.68) micro seconds. + * + * @return 0 on success. -EINVAL otherwise. + */ +int al_iofic_moder_res_config(void __iomem *regs_base, int group, + uint8_t resolution); + +/** + * configure the moderation timer interval for a given legacy interrupt group + * + * @param regs_base regs pointer to unit registers + * @param group the interrupt group + * @param interval between interrupts in resolution units. 0 disable + * + * @return 0 on success. -EINVAL otherwise. + */ +int al_iofic_legacy_moder_interval_config(void __iomem *regs_base, int group, + uint8_t interval); + +/** + * configure the moderation timer interval for a given msix vector + * + * @param regs_base pointer to unit registers + * @param group the interrupt group + * @param vector vector index + * @param interval interval between interrupts, 0 disable + * + * @return 0 on success. -EINVAL otherwise. + */ +int al_iofic_msix_moder_interval_config(void __iomem *regs_base, int group, + uint8_t vector, uint8_t interval); + +/** + * return the offset of the unmask register for a given group. + * this function can be used when the upper layer wants to directly + * access the unmask regiter and bypass the al_iofic_unmask() API. + * + * @param regs_base regs pointer to unit registers + * @param group the interrupt group + * @return the offset of the unmask register. + */ +uint32_t __iomem * al_iofic_unmask_offset_get(void __iomem *regs_base, int group); + +/** + * unmask specific interrupts for a given group + * this functions guarantees atomic operations, it is performance optimized as + * it will not require read-modify-write. The unmask done using the interrupt + * mask clear register, so it's safe to call it while the mask is changed by + * the HW (auto mask) or another core. + * + * @param regs_base pointer to unit registers + * @param group the interrupt group + * @param mask bitwise of interrupts to unmask, set bits will be unmasked. + */ +void al_iofic_unmask(void __iomem *regs_base, int group, uint32_t mask); + +/** + * mask specific interrupts for a given group + * this functions modifies interrupt mask register, the callee must make sure + * the mask is not changed by another cpu. + * + * @param regs_base pointer to unit registers + * @param group the interrupt group + * @param mask bitwise of interrupts to mask, set bits will be masked. + */ +void al_iofic_mask(void __iomem *regs_base, int group, uint32_t mask); + +/** + * read the mask register for a given group + * this functions return the interrupt mask register + * + * @param regs_base pointer to unit registers + * @param group the interrupt group + */ +uint32_t al_iofic_read_mask(void __iomem *regs_base, int group); + +/** + * read interrupt cause register for a given group + * this will clear the set bits if the Clear on Read mode enabled. + * @param regs_base pointer to unit registers + * @param group the interrupt group + */ +uint32_t al_iofic_read_cause(void __iomem *regs_base, int group); + +/** + * clear bits in the interrupt cause register for a given group + * + * @param regs_base pointer to unit registers + * @param group the interrupt group + * @param mask bitwise of bits to be cleared, set bits will be cleared. + */ +void al_iofic_clear_cause(void __iomem *regs_base, int group, uint32_t mask); + +/** + * set the cause register for a given group + * this function set the cause register. It will generate an interrupt (if + * the the interrupt isn't masked ) + * + * @param regs_base pointer to unit registers + * @param group the interrupt group + * @param mask bitwise of bits to be set. + */ +void al_iofic_set_cause(void __iomem *regs_base, int group, uint32_t mask); + +/** + * unmask specific interrupts from aborting the udma a given group + * + * @param regs_base pointer to unit registers + * @param group the interrupt group + * @param mask bitwise of interrupts to mask + */ +void al_iofic_abort_mask(void __iomem *regs_base, int group, uint32_t mask); + +#endif +/** @} end of interrupt controller group */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_iofic_regs.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_iofic_regs.h new file mode 100644 index 00000000000000..cb2339ae929019 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_iofic_regs.h @@ -0,0 +1,122 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + + +#ifndef __AL_HAL_IOFIC_REG_H +#define __AL_HAL_IOFIC_REG_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + +struct al_iofic_grp_ctrl { + uint32_t int_cause_grp; /* Interrupt Cause RegisterSet by hardware */ + uint32_t rsrvd1; + uint32_t int_cause_set_grp; /* Interrupt Cause Set RegisterWriting 1 to a bit in t ... */ + uint32_t rsrvd2; + uint32_t int_mask_grp; /* Interrupt Mask RegisterIf Auto-mask control bit =TR ... */ + uint32_t rsrvd3; + uint32_t int_mask_clear_grp; /* Interrupt Mask Clear RegisterUsed when auto-mask co ... */ + uint32_t rsrvd4; + uint32_t int_status_grp; /* Interrupt status RegisterThis register latch the st ... */ + uint32_t rsrvd5; + uint32_t int_control_grp; /* Interrupt Control Register */ + uint32_t rsrvd6; + uint32_t int_abort_msk_grp; /* Interrupt Mask RegisterEach bit in this register ma ... */ + uint32_t rsrvd7; + uint32_t int_log_msk_grp; /* Interrupt Log RegisterEach bit in this register mas ... */ + uint32_t rsrvd8; +}; + +struct al_iofic_grp_mod { + uint32_t grp_int_mod_reg; /* Interrupt moderation registerDedicated moderation in ... */ + uint32_t rsrvd; +}; + +struct al_iofic_regs { + struct al_iofic_grp_ctrl ctrl[0]; + uint32_t rsrvd1[0x400 >> 2]; + struct al_iofic_grp_mod grp_int_mod[0][32]; +}; + + +/* +* Registers Fields +*/ + + +/**** int_control_grp register ****/ +/* When Clear_on_Read =1, All bits of Cause register ... */ +#define INT_CONTROL_GRP_CLEAR_ON_READ (1 << 0) +/* (must be set only when MSIX is enabled)When Auto-Ma ... */ +#define INT_CONTROL_GRP_AUTO_MASK (1 << 1) +/* Auto_Clear (RW)When Auto-Clear =1, the bits in the ... */ +#define INT_CONTROL_GRP_AUTO_CLEAR (1 << 2) +/* When Set_on_Posedge =1, the bits in the interrupt c ... */ +#define INT_CONTROL_GRP_SET_ON_POSEDGE (1 << 3) +/* When Moderation_Reset =1, all Moderation timers ass ... */ +#define INT_CONTROL_GRP_MOD_RST (1 << 4) +/* When mask_msi_x =1, No MSI-X from this group is sen ... */ +#define INT_CONTROL_GRP_MASK_MSI_X (1 << 5) +/* MSI-X AWID value, same ID for all cause bits */ +#define INT_CONTROL_GRP_AWID_MASK 0x00000F00 +#define INT_CONTROL_GRP_AWID_SHIFT 8 +/* This value determines the interval between interrup ... */ +#define INT_CONTROL_GRP_MOD_INTV_MASK 0x00FF0000 +#define INT_CONTROL_GRP_MOD_INTV_SHIFT 16 +/* This value determines the Moderation_Timer_Clock sp ... */ +#define INT_CONTROL_GRP_MOD_RES_MASK 0x0F000000 +#define INT_CONTROL_GRP_MOD_RES_SHIFT 24 + +/**** grp_int_mod_reg register ****/ +/* Interrupt Moderation Interval registerDedicated reg ... */ +#define INT_MOD_INTV_MASK 0x000000FF +#define INT_MOD_INTV_SHIFT 0 + + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_IOFIC_REG_H */ + + + + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_iomap.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_iomap.h new file mode 100644 index 00000000000000..79f9bccd548de7 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_iomap.h @@ -0,0 +1,176 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * This file contains the default I/O mappings for Annapurna Labs + */ + +#ifndef __AL_HAL_IOMAP_H__ +#define __AL_HAL_IOMAP_H__ + +/* Primary Windows */ +#define AL_PCIE_0_BASE 0xe0000000 +#define AL_PCIE_0_SIZE SZ_128M +#define AL_PCIE_1_BASE 0xe8000000 +#define AL_PCIE_1_SIZE SZ_64M +#define AL_PCIE_2_BASE 0xec000000 +#define AL_PCIE_2_SIZE SZ_64M +#define AL_NOR_BASE 0xf4000000 +#define AL_SPI_BASE 0xf8000000 +#define AL_NAND_BASE 0xfa100000 +#define AL_SB_BASE 0xfc000000 +#define AL_SB_SIZE SZ_32M +#define AL_NB_BASE 0xfb000000 +#define AL_NB_SIZE SZ_2M +#define AL_PCIE_0_ECAM_BASE 0xfb600000 +#define AL_PCIE_0_ECAM_SIZE SZ_2M +#define AL_PCIE_1_ECAM_BASE 0xfb800000 +#define AL_PCIE_1_ECAM_SIZE SZ_2M +#define AL_PCIE_2_ECAM_BASE 0xfba00000 +#define AL_PCIE_2_ECAM_SIZE SZ_2M +#define AL_PCIE_INT_ECAM_BASE 0xfbc00000 +#define AL_PCIE_INT_ECAM_SIZE SZ_1M +#define AL_PCIE_INT_BASE 0xfe000000 +#define AL_PCIE_INT_SIZE SZ_16M + +#define AL_MSIX_SPACE_BASE_LOW 0xfbe00000 +#define AL_MSIX_SPACE_BASE_HIGH 0x0 + +#define AL_PBS_INT_MEM_BASE 0xfbff0000 + +#define AL_DRAM_2_HIGH 0x00000001 /* 4GB - 8GB */ +#define AL_DRAM_3_HIGH 0x00000002 /* 3GB - 4GB */ + +/******************/ +/* SB Sub Windows */ +/******************/ +#define AL_SB_PCIE_BASE(idx) (AL_SB_BASE + 0x01800000 + \ + ((idx) * 0x20000)) + +#define AL_SB_PCIE_NUM 3 + +#define AL_SB_RING_BASE (AL_SB_BASE + 0x01860000) + +#define AL_SB_PBS_BASE (AL_SB_BASE + 0x01880000) + +#define AL_SB_SERDES_BASE (AL_SB_BASE + 0x018c0000) + +#define AL_SB_DFX_BASE (AL_SB_BASE + 0x018e0000) + +/******************/ +/* NB Sub Windows */ +/******************/ + +/* NB main / secondary GICs and their Sub Windows*/ +#define AL_NB_GIC_MAIN 0 +#define AL_NB_GIC_SECONDARY 1 + +#define AL_NB_GIC_BASE(id) (AL_NB_BASE + (id)*0x8000) + +#define AL_NB_GIC_DIST_BASE(id) (AL_NB_GIC_BASE(id) + 0x00001000) +#define AL_NB_GIC_CPU_BASE(id) (AL_NB_GIC_BASE(id) + 0x00002000) + +#define AL_NB_IOMMU_BASE(idx) (AL_NB_BASE + 0x30000 + (idx)*0x10000) + +#define AL_NB_IOMMU_NUM 2 + +/* NB service registers */ +#define AL_NB_SERVICE_BASE (AL_NB_BASE + 0x00070000) + +/* DDR Controller */ +#define AL_NB_DDR_CTL_BASE (AL_NB_BASE + 0x00080000) + +/* DDR PHY */ +#define AL_NB_DDR_PHY_BASE (AL_NB_BASE + 0x00088000) + +/* CCI Controller */ +#define AL_NB_CCI_BASE (AL_NB_BASE + 0x00090000) + +/* SB PBS Sub Windows */ +#define AL_I2C_PLD_BASE (AL_SB_PBS_BASE + 0x00000000) +#define AL_SPI_SLAVE_BASE (AL_SB_PBS_BASE + 0x00001000) +#define AL_SPI_MASTER_BASE (AL_SB_PBS_BASE + 0x00002000) + +#define AL_UART_BASE(idx) (AL_SB_PBS_BASE + 0x00003000 \ + + ((idx) * 0x1000)) +#define AL_UART_NUM 4 + +#define AL_GPIO_BASE(idx) (AL_SB_PBS_BASE + \ + ((idx != 5) ? 0x00007000 + ((idx) * 0x1000) : 0x17000)) +#define AL_GPIO_NUM 6 + +#define AL_WD_BASE(idx) (AL_SB_PBS_BASE + 0x0000c000 \ + + ((idx) * 0x1000)) +#define AL_WD_NUM 4 + +#define AL_TIMER_BASE(idx, sub_idx) \ + (AL_SB_PBS_BASE + 0x00010000 \ + + ((idx) * 0x1000) + ((sub_idx) * 0x20)) + +#define AL_TIMER_NUM 4 +#define AL_TIMER_SUB_TIMERS_NUM 2 + +#define AL_I2C_GEN_BASE (AL_SB_PBS_BASE + 0x00014000) +#define AL_PBS_UFC_WRAP_BASE (AL_SB_PBS_BASE + 0x00015000) +#define AL_PBS_UFC_CNTL_BASE (AL_SB_PBS_BASE + 0x00015800) +#define AL_PBS_OTP_BASE (AL_SB_PBS_BASE + 0x00016000) +#define AL_PBS_BOOT_ROM_BASE (AL_SB_PBS_BASE + 0x00020000) +#define AL_PBS_SRAM_BASE (AL_SB_PBS_BASE + 0x00024000) +#define AL_PBS_REGFILE_BASE (AL_SB_PBS_BASE + 0x00028000) + +/* SB Ring Sub Windows */ +#define AL_CMOS_NUM_GROUPS 10 + +#define AL_CMOS_GROUP_BASE(idx) (AL_SB_RING_BASE + (idx) * 0x100) + +#define AL_TEMP_SENSOR_BASE (AL_SB_RING_BASE + 0xa00) + +#define AL_PLL_SB 0 +#define AL_PLL_NB 1 +#define AL_PLL_CPU 2 + +#define AL_PLL_BASE(id) (AL_SB_RING_BASE + 0xb00 + (id) * 0x100) + +/* SB DFX Sub Windows */ +#define AL_DFX_CTRL_BASE (AL_SB_DFX_BASE + 0x0) +#define AL_DAP2JTAG_BASE (AL_SB_DFX_BASE + 0x8000) + +/***************************/ +/* PBS int mem sub windows */ +/***************************/ +#define AL_PBS_INT_MEM_BOOT_ROM_BASE (AL_PBS_INT_MEM_BASE + 0x0) +#define AL_PBS_INT_MEM_SRAM_BASE (AL_PBS_INT_MEM_BASE + 0x4000) + +#endif diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_m2m_udma.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_m2m_udma.h new file mode 100644 index 00000000000000..d31e8132753727 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_m2m_udma.h @@ -0,0 +1,147 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_m2m_udma M2M UDMA + * @ingroup group_udma_api + * M2M UDMA + * @{ + * @file al_hal_m2m_udma.h + * + * @brief Header file for HAL driver for DMA that compound of M2S and S2M. + * + * The M2M UDMA is a software concept that defines a DMA that is consisted from + * M2S and S2M UDMAs, this concept is used to share common functionality + * between different DMA's that use M2S and S2M UDMAs, this is the case for the + * RAID and Crypto Acceleration DMAs. + * The M2M UDMA is built on top of the UDMA driver, while the later manages + * either S2M or M2S UDMA, the M2M instantiates two UDMA engines, and uses the + * UDMA driver to manage and provide the following functionalities: + * - S2M and M2S UDMA initialization. + * - S2M and M2S UDMA Queues initialization. + * - manages the state of the two GMAs. + * Other functionalities will be provided directly by the UDMA driver. + */ + +#ifndef __AL_HAL_M2M_UDMA_H__ +#define __AL_HAL_M2M_UDMA_H__ + +#include +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/** M2M UDMA private data structure */ +struct al_m2m_udma { + char *name; + void __iomem *m2s_regs_base; + void __iomem *s2m_regs_base; + uint8_t num_of_queues; + struct al_udma tx_udma; /** the m2s component of the M2M UDMA */ + struct al_udma rx_udma; /** the s2m component of the M2M UDMA */ +}; + +/** M2M UDMA parameters from upper layer */ +struct al_m2m_udma_params { + void __iomem *m2s_regs_base; + void __iomem *s2m_regs_base; + char *name; /** the upper layer must keep the string area */ + uint8_t num_of_queues;/** number of queues */ + uint8_t max_m2s_descs_per_pkt; /** maximum descriptors per m2s packet */ + uint8_t max_s2m_descs_per_pkt; /** maximum descriptors per s2m packet */ +}; + +/** + * initialize M2M UDMA + * + * @param m2m_udma m2m udma handle + * @param params m2m udma parameters from upper layer + * + * @return 0 on success. -EINVAL otherwise. + */ +int al_m2m_udma_init(struct al_m2m_udma *m2m_udma, + struct al_m2m_udma_params *params); + +/** + * initialize the m2s(tx) and s2m(tx) udmas of the queue + * + * @param m2m_udma m2m udma handle + * @param qid queue index + * @param tx_params udma queue pramas fot the tx udma queue + * @param rx_params udma queue pramas fot the rx udma queue + * + * @return 0 if no error found. + * -EINVAL if the qid is out of range + * -EIO if queue was already initialized + */ +int al_m2m_udma_q_init(struct al_m2m_udma *m2m_udma, uint32_t qid, + struct al_udma_q_params *tx_params, + struct al_udma_q_params *rx_params); +/** + * Change the M2M UDMA state + * + * @param dma m2m udma handle + * @param udma_state the target state + * + * @return 0 + */ +int al_m2m_udma_state_set(struct al_m2m_udma *dma, + enum al_udma_state udma_state); + +/** + * Get udma handle of the tx or rx udma, this handle can be used to call misc + * configuration functions defined at al_udma_config.h + * + * @param m2m_udma m2m udma handle + * @param type tx or rx udma + * @param udma the requested udma handle written to this pointer + * + * @return 0 + */ +int al_m2m_udma_handle_get(struct al_m2m_udma *m2m_udma, + enum al_udma_type type, + struct al_udma **udma); + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +#endif /* __AL_HAL_M2M_UDMA_H__ */ +/** @} end of M2M UDMA group */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_pbs_regs.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_pbs_regs.h new file mode 100644 index 00000000000000..d65b9662da267e --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_pbs_regs.h @@ -0,0 +1,965 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + + +#ifndef __AL_PBS_REG_H +#define __AL_PBS_REG_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + + + +struct al_pbs_unit { + uint32_t conf_bus; /* conf_bus, configuration of ... */ + uint32_t dram_0_nb_bar_high; /* PASW high */ + uint32_t dram_0_nb_bar_low; /* PASW low */ + uint32_t dram_1_nb_bar_high; /* PASW high */ + uint32_t dram_1_nb_bar_low; /* PASW low */ + uint32_t dram_2_nb_bar_high; /* PASW high */ + uint32_t dram_2_nb_bar_low; /* PASW low */ + uint32_t dram_3_nb_bar_high; /* PASW high */ + uint32_t dram_3_nb_bar_low; /* PASW low */ + uint32_t msix_nb_bar_high; /* PASW high */ + uint32_t msix_nb_bar_low; /* PASW low */ + uint32_t dram_0_sb_bar_high; /* PASW high */ + uint32_t dram_0_sb_bar_low; /* PASW low */ + uint32_t dram_1_sb_bar_high; /* PASW high */ + uint32_t dram_1_sb_bar_low; /* PASW low */ + uint32_t dram_2_sb_bar_high; /* PASW high */ + uint32_t dram_2_sb_bar_low; /* PASW low */ + uint32_t dram_3_sb_bar_high; /* PASW high */ + uint32_t dram_3_sb_bar_low; /* PASW low */ + uint32_t msix_sb_bar_high; /* PASW high */ + uint32_t msix_sb_bar_low; /* PASW low */ + uint32_t pcie_mem0_bar_high; /* PASW high */ + uint32_t pcie_mem0_bar_low; /* PASW low */ + uint32_t pcie_mem1_bar_high; /* PASW high */ + uint32_t pcie_mem1_bar_low; /* PASW low */ + uint32_t pcie_mem2_bar_high; /* PASW high */ + uint32_t pcie_mem2_bar_low; /* PASW low */ + uint32_t pcie_ext_ecam0_bar_high; /* PASW high */ + uint32_t pcie_ext_ecam0_bar_low; /* PASW low */ + uint32_t pcie_ext_ecam1_bar_high; /* PASW high */ + uint32_t pcie_ext_ecam1_bar_low; /* PASW low */ + uint32_t pcie_ext_ecam2_bar_high; /* PASW high */ + uint32_t pcie_ext_ecam2_bar_low; /* PASW low */ + uint32_t pbs_nor_bar_high; /* PASW high */ + uint32_t pbs_nor_bar_low; /* PASW low */ + uint32_t pbs_spi_bar_high; /* PASW high */ + uint32_t pbs_spi_bar_low; /* PASW low */ + uint32_t rsrvd_0[3]; + uint32_t pbs_nand_bar_high; /* PASW high */ + uint32_t pbs_nand_bar_low; /* PASW low */ + uint32_t pbs_int_mem_bar_high; /* PASW high */ + uint32_t pbs_int_mem_bar_low; /* PASW low */ + uint32_t pbs_boot_bar_high; /* PASW high */ + uint32_t pbs_boot_bar_low; /* PASW low */ + uint32_t nb_int_bar_high; /* PASW high */ + uint32_t nb_int_bar_low; /* PASW low */ + uint32_t nb_stm_bar_high; /* PASW high */ + uint32_t nb_stm_bar_low; /* PASW low */ + uint32_t pcie_ecam_int_bar_high; /* PASW high */ + uint32_t pcie_ecam_int_bar_low; /* PASW low */ + uint32_t pcie_mem_int_bar_high; /* PASW high */ + uint32_t pcie_mem_int_bar_low; /* PASW low */ + uint32_t winit_cntl; /* control */ + uint32_t latch_bars; /* control */ + uint32_t pcie_conf_0; /* control */ + uint32_t pcie_conf_1; /* control */ + uint32_t serdes_mux_pipe; /* control */ + uint32_t dma_io_master_map; /* control */ + uint32_t i2c_pld_status_high; /* status */ + uint32_t i2c_pld_status_low; /* status */ + uint32_t spi_dbg_status_high; /* status */ + uint32_t spi_dbg_status_low; /* status */ + uint32_t spi_mst_status_high; /* status */ + uint32_t spi_mst_status_low; /* status */ + uint32_t mem_pbs_parity_err_high; /* log */ + uint32_t mem_pbs_parity_err_low; /* log */ + uint32_t boot_strap; /* log */ + uint32_t cfg_axi_conf_0; /* conf */ + uint32_t cfg_axi_conf_1; /* conf */ + uint32_t cfg_axi_conf_2; /* conf */ + uint32_t cfg_axi_conf_3; /* conf */ + uint32_t spi_mst_conf_0; /* conf */ + uint32_t spi_mst_conf_1; /* conf */ + uint32_t spi_slv_conf_0; /* conf */ + uint32_t apb_mem_conf_int; /* conf */ + uint32_t sb2nb_cfg_dram_remap; /* PASW remap register */ + uint32_t pbs_mux_sel_0; /* control */ + uint32_t pbs_mux_sel_1; /* control */ + uint32_t pbs_mux_sel_2; /* control */ + uint32_t pbs_mux_conf; /* control */ + uint32_t sb_int_bar_high; /* PASW high */ + uint32_t sb_int_bar_low; /* PASW low */ + uint32_t ufc_pbs_parity_err_high; /* log */ + uint32_t ufc_pbs_parity_err_low; /* log */ + uint32_t gen_conf; /* cntl */ + uint32_t cpu_debug; /* cntl */ + uint32_t uart0_debug; /* status */ + uint32_t uart1_debug; /* status */ + uint32_t uart2_debug; /* status */ + uint32_t uart3_debug; /* status */ + uint32_t uart0_conf_status; /* cntl */ + uint32_t uart1_conf_status; /* cntl */ + uint32_t uart2_conf_status; /* cntl */ + uint32_t uart3_conf_status; /* cntl */ + uint32_t gpio0_conf_status; /* cntl */ + uint32_t gpio1_conf_status; /* cntl */ + uint32_t gpio2_conf_status; /* cntl */ + uint32_t gpio3_conf_status; /* cntl */ + uint32_t gpio4_conf_status; /* cntl */ + uint32_t i2c_gen_conf_status; /* cntl */ + uint32_t i2c_gen_debug; /* cntl */ + uint32_t watch_dog_reset_out; /* cntl */ + uint32_t otp_magic_num; /* cntl */ + uint32_t otp_cntl; /* cntl */ + uint32_t otp_cfg_0; /* cfg */ + uint32_t otp_cfg_1; /* cfg */ + uint32_t otp_cfg_3; /* cfg */ + uint32_t cfg_nand_0; /* cfg */ + uint32_t cfg_nand_1; /* cfg */ + uint32_t cfg_nand_2; /* cfg */ + uint32_t cfg_nand_3; /* cfg */ + uint32_t nb_nic_regs_bar_high; /* PASW high */ + uint32_t nb_nic_regs_bar_low; /* PASW low */ + uint32_t sb_nic_regs_bar_high; /* PASW high */ + uint32_t sb_nic_regs_bar_low; /* PASW low */ + uint32_t serdes_mux_multi_0; /* control */ + uint32_t serdes_mux_multi_1; /* control */ + uint32_t pbs_ulpi_mux_conf; /* control */ + uint32_t wr_once_dbg_dis_ovrd_reg; /* cntl */ + uint32_t gpio5_conf_status; /* cntl */ + uint32_t rsrvd[6]; +}; + +struct al_pbs_regs { + struct al_pbs_unit unit; +}; + + +/* +* Registers Fields +*/ + + +/**** conf_bus register ****/ +/* read slave error enable */ +#define PBS_UNIT_CONF_BUS_RD_SLVERR_EN (1 << 0) +/* write slave error enable */ +#define PBS_UNIT_CONF_BUS_WR_SLVERR_EN (1 << 1) +/* read decode error enable */ +#define PBS_UNIT_CONF_BUS_RD_DECERR_EN (1 << 2) +/* write decode error enable */ +#define PBS_UNIT_CONF_BUS_WR_DECERR_EN (1 << 3) +/* for debug clear the apb SM */ +#define PBS_UNIT_CONF_BUS_CLR_APB_FSM (1 << 4) +/* for debug clear the WFIFO */ +#define PBS_UNIT_CONF_BUS_CLR_WFIFO_CLEAR (1 << 5) +/* Arbiter between read and write channel */ +#define PBS_UNIT_CONF_BUS_WRR_CNT_MASK 0x000001C0 +#define PBS_UNIT_CONF_BUS_WRR_CNT_SHIFT 6 + +/**** dram_0_nb_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_DRAM_0_NB_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_DRAM_0_NB_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_DRAM_0_NB_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_DRAM_0_NB_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_DRAM_0_NB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_DRAM_0_NB_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** dram_1_nb_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_DRAM_1_NB_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_DRAM_1_NB_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_DRAM_1_NB_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_DRAM_1_NB_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_DRAM_1_NB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_DRAM_1_NB_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** dram_2_nb_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_DRAM_2_NB_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_DRAM_2_NB_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_DRAM_2_NB_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_DRAM_2_NB_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_DRAM_2_NB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_DRAM_2_NB_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** dram_3_nb_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_DRAM_3_NB_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_DRAM_3_NB_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_DRAM_3_NB_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_DRAM_3_NB_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_DRAM_3_NB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_DRAM_3_NB_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** msix_nb_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_MSIX_NB_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_MSIX_NB_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_MSIX_NB_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_MSIX_NB_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_MSIX_NB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_MSIX_NB_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** dram_0_sb_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_DRAM_0_SB_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_DRAM_0_SB_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_DRAM_0_SB_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_DRAM_0_SB_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_DRAM_0_SB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_DRAM_0_SB_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** dram_1_sb_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_DRAM_1_SB_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_DRAM_1_SB_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_DRAM_1_SB_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_DRAM_1_SB_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_DRAM_1_SB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_DRAM_1_SB_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** dram_2_sb_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_DRAM_2_SB_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_DRAM_2_SB_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_DRAM_2_SB_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_DRAM_2_SB_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_DRAM_2_SB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_DRAM_2_SB_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** dram_3_sb_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_DRAM_3_SB_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_DRAM_3_SB_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_DRAM_3_SB_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_DRAM_3_SB_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_DRAM_3_SB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_DRAM_3_SB_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** msix_sb_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_MSIX_SB_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_MSIX_SB_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_MSIX_SB_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_MSIX_SB_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_MSIX_SB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_MSIX_SB_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pcie_mem0_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PCIE_MEM0_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PCIE_MEM0_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PCIE_MEM0_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PCIE_MEM0_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PCIE_MEM0_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PCIE_MEM0_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pcie_mem1_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PCIE_MEM1_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PCIE_MEM1_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PCIE_MEM1_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PCIE_MEM1_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PCIE_MEM1_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PCIE_MEM1_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pcie_mem2_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PCIE_MEM2_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PCIE_MEM2_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PCIE_MEM2_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PCIE_MEM2_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PCIE_MEM2_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PCIE_MEM2_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pcie_ext_ecam0_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PCIE_EXT_ECAM0_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PCIE_EXT_ECAM0_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PCIE_EXT_ECAM0_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PCIE_EXT_ECAM0_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PCIE_EXT_ECAM0_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PCIE_EXT_ECAM0_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pcie_ext_ecam1_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PCIE_EXT_ECAM1_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PCIE_EXT_ECAM1_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PCIE_EXT_ECAM1_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PCIE_EXT_ECAM1_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PCIE_EXT_ECAM1_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PCIE_EXT_ECAM1_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pcie_ext_ecam2_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PCIE_EXT_ECAM2_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PCIE_EXT_ECAM2_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PCIE_EXT_ECAM2_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PCIE_EXT_ECAM2_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PCIE_EXT_ECAM2_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PCIE_EXT_ECAM2_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pbs_nor_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PBS_NOR_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PBS_NOR_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PBS_NOR_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PBS_NOR_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PBS_NOR_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PBS_NOR_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pbs_spi_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PBS_SPI_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PBS_SPI_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PBS_SPI_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PBS_SPI_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PBS_SPI_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PBS_SPI_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pbs_nand_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PBS_NAND_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PBS_NAND_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PBS_NAND_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PBS_NAND_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PBS_NAND_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PBS_NAND_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pbs_int_mem_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PBS_INT_MEM_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PBS_INT_MEM_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PBS_INT_MEM_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PBS_INT_MEM_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PBS_INT_MEM_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PBS_INT_MEM_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pbs_boot_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PBS_BOOT_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PBS_BOOT_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PBS_BOOT_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PBS_BOOT_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PBS_BOOT_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PBS_BOOT_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** nb_int_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_NB_INT_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_NB_INT_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_NB_INT_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_NB_INT_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_NB_INT_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_NB_INT_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** nb_stm_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_NB_STM_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_NB_STM_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_NB_STM_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_NB_STM_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_NB_STM_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_NB_STM_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pcie_ecam_int_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PCIE_ECAM_INT_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PCIE_ECAM_INT_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PCIE_ECAM_INT_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PCIE_ECAM_INT_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PCIE_ECAM_INT_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PCIE_ECAM_INT_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** pcie_mem_int_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_PCIE_MEM_INT_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_PCIE_MEM_INT_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_PCIE_MEM_INT_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_PCIE_MEM_INT_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_PCIE_MEM_INT_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_PCIE_MEM_INT_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** winit_cntl register ****/ +/* When set enable accsess to winit regs, in normal mode. */ +#define PBS_UNIT_WINIT_CNTL_ENABLE_WINIT_REGS_ACCESS (1 << 0) +/* Rsrvd */ +#define PBS_UNIT_WINIT_CNTL_RSRVD_MASK 0xFFFFFFFE +#define PBS_UNIT_WINIT_CNTL_RSRVD_SHIFT 1 + +/**** latch_bars register ****/ +/* The SW should clear this bit before any bar update, and reset ... */ +#define PBS_UNIT_LATCH_BARS_ENABLE (1 << 0) +/* Rsrvd */ +#define PBS_UNIT_LATCH_BARS_RSRVD_MASK 0xFFFFFFFE +#define PBS_UNIT_LATCH_BARS_RSRVD_SHIFT 1 + +/**** pcie_conf_0 register ****/ +/* NOT_use, config internal inside each PCIe core */ +#define PBS_UNIT_PCIE_CONF_0_DEVS_TYPE_MASK 0x00000FFF +#define PBS_UNIT_PCIE_CONF_0_DEVS_TYPE_SHIFT 0 +/* sys_aux_det value */ +#define PBS_UNIT_PCIE_CONF_0_SYS_AUX_PWR_DET_VEC_MASK 0x00007000 +#define PBS_UNIT_PCIE_CONF_0_SYS_AUX_PWR_DET_VEC_SHIFT 12 +/* Rsrvd */ +#define PBS_UNIT_PCIE_CONF_0_RSRVD_MASK 0xFFFF8000 +#define PBS_UNIT_PCIE_CONF_0_RSRVD_SHIFT 15 + +/**** pcie_conf_1 register ****/ +/* which pcie exist, the PCIe device will be under reset untill ... */ +#define PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_MASK 0x00000007 +#define PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT 0 +/* Rsrvd */ +#define PBS_UNIT_PCIE_CONF_1_RSRVD_MASK 0xFFFFFFF8 +#define PBS_UNIT_PCIE_CONF_1_RSRVD_SHIFT 3 + +/**** serdes_mux_pipe register ****/ +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_SERDES_2_MASK 0x00000007 +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_SERDES_2_SHIFT 0 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_PIPE_RSRVD_3 (1 << 3) +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_SERDES_3_MASK 0x00000070 +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_SERDES_3_SHIFT 4 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_PIPE_RSRVD_7 (1 << 7) +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_B_0_MASK 0x00000300 +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_B_0_SHIFT 8 +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_B_1_MASK 0x00000C00 +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_B_1_SHIFT 10 +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_C_0_MASK 0x00003000 +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_C_0_SHIFT 12 +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_C_1_MASK 0x0000C000 +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_C_1_SHIFT 14 +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_USB_A_0_MASK 0x00030000 +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_USB_A_0_SHIFT 16 +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_USB_B_0_MASK 0x000C0000 +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_USB_B_0_SHIFT 18 +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_CLKI_SER_2_MASK 0x00300000 +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_CLKI_SER_2_SHIFT 20 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_PIPE_RSRVD_23_22_MASK 0x00C00000 +#define PBS_UNIT_SERDES_MUX_PIPE_RSRVD_23_22_SHIFT 22 +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_CLKI_SER_3_MASK 0x07000000 +#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_CLKI_SER_3_SHIFT 24 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_PIPE_RSRVD_MASK 0xF8000000 +#define PBS_UNIT_SERDES_MUX_PIPE_RSRVD_SHIFT 27 + +/**** dma_io_master_map register ****/ +/* when set map all the dma_io transaction to the dram, regardle ... */ +#define PBS_UNIT_DMA_IO_MASTER_MAP_CNTL (1 << 0) +/* Rsrvd */ +#define PBS_UNIT_DMA_IO_MASTER_MAP_RSRVD_MASK 0xFFFFFFFE +#define PBS_UNIT_DMA_IO_MASTER_MAP_RSRVD_SHIFT 1 + +/**** i2c_pld_status_high register ****/ +/* i2c pre load status */ +#define PBS_UNIT_I2C_PLD_STATUS_HIGH_STATUS_MASK 0x000000FF +#define PBS_UNIT_I2C_PLD_STATUS_HIGH_STATUS_SHIFT 0 + +/**** spi_dbg_status_high register ****/ +/* spi dbg load status */ +#define PBS_UNIT_SPI_DBG_STATUS_HIGH_STATUS_MASK 0x000000FF +#define PBS_UNIT_SPI_DBG_STATUS_HIGH_STATUS_SHIFT 0 + +/**** spi_mst_status_high register ****/ +/* sp imst load status */ +#define PBS_UNIT_SPI_MST_STATUS_HIGH_STATUS_MASK 0x000000FF +#define PBS_UNIT_SPI_MST_STATUS_HIGH_STATUS_SHIFT 0 + +/**** mem_pbs_parity_err_high register ****/ +/* address latch in case of error */ +#define PBS_UNIT_MEM_PBS_PARITY_ERR_HIGH_ADDR_MASK 0x000000FF +#define PBS_UNIT_MEM_PBS_PARITY_ERR_HIGH_ADDR_SHIFT 0 + +/**** cfg_axi_conf_0 register ****/ +/* value */ +#define PBS_UNIT_CFG_AXI_CONF_0_DBG_RD_ID_MASK 0x0000007F +#define PBS_UNIT_CFG_AXI_CONF_0_DBG_RD_ID_SHIFT 0 +/* value */ +#define PBS_UNIT_CFG_AXI_CONF_0_DBG_WR_ID_MASK 0x00003F80 +#define PBS_UNIT_CFG_AXI_CONF_0_DBG_WR_ID_SHIFT 7 +/* value */ +#define PBS_UNIT_CFG_AXI_CONF_0_PLD_WR_ID_MASK 0x001FC000 +#define PBS_UNIT_CFG_AXI_CONF_0_PLD_WR_ID_SHIFT 14 +/* value */ +#define PBS_UNIT_CFG_AXI_CONF_0_DBG_AWCACHE_MASK 0x01E00000 +#define PBS_UNIT_CFG_AXI_CONF_0_DBG_AWCACHE_SHIFT 21 +/* value */ +#define PBS_UNIT_CFG_AXI_CONF_0_DBG_ARCACHE_MASK 0x1E000000 +#define PBS_UNIT_CFG_AXI_CONF_0_DBG_ARCACHE_SHIFT 25 +/* value */ +#define PBS_UNIT_CFG_AXI_CONF_0_DBG_AXPROT_MASK 0xE0000000 +#define PBS_UNIT_CFG_AXI_CONF_0_DBG_AXPROT_SHIFT 29 + +/**** cfg_axi_conf_1 register ****/ +/* value */ +#define PBS_UNIT_CFG_AXI_CONF_1_DBG_ARUSER_MASK 0x03FFFFFF +#define PBS_UNIT_CFG_AXI_CONF_1_DBG_ARUSER_SHIFT 0 +/* value */ +#define PBS_UNIT_CFG_AXI_CONF_1_DBG_ARQOS_MASK 0x3C000000 +#define PBS_UNIT_CFG_AXI_CONF_1_DBG_ARQOS_SHIFT 26 + +/**** cfg_axi_conf_2 register ****/ +/* value */ +#define PBS_UNIT_CFG_AXI_CONF_2_DBG_AWUSER_MASK 0x03FFFFFF +#define PBS_UNIT_CFG_AXI_CONF_2_DBG_AWUSER_SHIFT 0 +/* value */ +#define PBS_UNIT_CFG_AXI_CONF_2_DBG_AWQOS_MASK 0x3C000000 +#define PBS_UNIT_CFG_AXI_CONF_2_DBG_AWQOS_SHIFT 26 + +/**** spi_mst_conf_0 register ****/ +/* value */ +#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_SRL (1 << 0) +/* value */ +#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_SCPOL (1 << 1) +/* value */ +#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_SCPH (1 << 2) +/* value */ +#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_SER_MASK 0x00000078 +#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_SER_SHIFT 3 +/* value */ +#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_BAUD_MASK 0x007FFF80 +#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_BAUD_SHIFT 7 +/* value */ +#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_RD_CMD_MASK 0x7F800000 +#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_RD_CMD_SHIFT 23 + +/**** spi_mst_conf_1 register ****/ +/* value */ +#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_WR_CMD_MASK 0x000000FF +#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_WR_CMD_SHIFT 0 +/* value */ +#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_ADDR_BYTES_NUM_MASK 0x00000700 +#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_ADDR_BYTES_NUM_SHIFT 8 +/* value */ +#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_TMODE_MASK 0x00001800 +#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_TMODE_SHIFT 11 +/* value */ +#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_FAST_RD (1 << 13) + +/**** spi_slv_conf_0 register ****/ +/* value */ +#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_BAUD_MASK 0x0000FFFF +#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_BAUD_SHIFT 0 +/* value. The reset va;ue is according to boot strap */ +#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_SCPOL (1 << 16) +/* value. The reset va;ue is according to boot strap */ +#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_SCPH (1 << 17) +/* value */ +#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_SER_MASK 0x03FC0000 +#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_SER_SHIFT 18 +/* value */ +#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_SRL (1 << 26) +/* value */ +#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_TMODE_MASK 0x18000000 +#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_TMODE_SHIFT 27 + +/**** apb_mem_conf_int register ****/ +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_PBS_WRR_CNT_MASK 0x00000007 +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_PBS_WRR_CNT_SHIFT 0 +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_I2C_PLD_APB_MIX_ARB (1 << 3) +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_SPI_DBG_APB_MIX_ARB (1 << 4) +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_SPI_MST_APB_MIX_ARB (1 << 5) +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_I2C_PLD_CLEAR_FSM (1 << 6) +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_SPI_DBG_CLEAR_FSM (1 << 7) +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_SPI_MST_CLEAR_FSM (1 << 8) +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_PBS_AXI_FSM_CLEAR (1 << 9) +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_PBS_AXI_FIFOS_CLEAR (1 << 10) +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_BOOTROM_PARITY_EN (1 << 11) +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_RD_SLV_ERR_EN (1 << 12) +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_RD_DEC_ERR_EN (1 << 13) +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_WR_SLV_ERR_EN (1 << 14) +/* value */ +#define PBS_UNIT_APB_MEM_CONF_INT_CFG_WR_DEC_ERR_EN (1 << 15) + +/**** sb_int_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_SB_INT_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_SB_INT_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reseved fiels */ +#define PBS_UNIT_SB_INT_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_SB_INT_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_SB_INT_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_SB_INT_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** ufc_pbs_parity_err_high register ****/ +/* address latch in case of error */ +#define PBS_UNIT_UFC_PBS_PARITY_ERR_HIGH_ADDR_MASK 0x000000FF +#define PBS_UNIT_UFC_PBS_PARITY_ERR_HIGH_ADDR_SHIFT 0 + +/**** uart0_conf_status register ****/ +/* Conf:// [0] -- DSR_N RW bit// [1] -- DCD_N RW bit// [2] -- RI ... */ +#define PBS_UNIT_UART0_CONF_STATUS_CONF_MASK 0x0000FFFF +#define PBS_UNIT_UART0_CONF_STATUS_CONF_SHIFT 0 +/* Status:// [16] -- dtr_n RO bit// [17] -- OUT1_N RO bit// [18] ... */ +#define PBS_UNIT_UART0_CONF_STATUS_STATUS_MASK 0xFFFF0000 +#define PBS_UNIT_UART0_CONF_STATUS_STATUS_SHIFT 16 + +/**** uart1_conf_status register ****/ +/* Conf: // [0] -- DSR_N RW bit // [1] -- DCD_N RW bit // [2] -- ... */ +#define PBS_UNIT_UART1_CONF_STATUS_CONF_MASK 0x0000FFFF +#define PBS_UNIT_UART1_CONF_STATUS_CONF_SHIFT 0 +/* Status: // [16] -- dtr_n RO bit // [17] -- OUT1_N RO bit // [ ... */ +#define PBS_UNIT_UART1_CONF_STATUS_STATUS_MASK 0xFFFF0000 +#define PBS_UNIT_UART1_CONF_STATUS_STATUS_SHIFT 16 + +/**** uart2_conf_status register ****/ +/* Conf: // [0] -- DSR_N RW bit // [1] -- DCD_N RW bit // [2] -- ... */ +#define PBS_UNIT_UART2_CONF_STATUS_CONF_MASK 0x0000FFFF +#define PBS_UNIT_UART2_CONF_STATUS_CONF_SHIFT 0 +/* Status: // [16] -- dtr_n RO bit // [17] -- OUT1_N RO bit // [ ... */ +#define PBS_UNIT_UART2_CONF_STATUS_STATUS_MASK 0xFFFF0000 +#define PBS_UNIT_UART2_CONF_STATUS_STATUS_SHIFT 16 + +/**** uart3_conf_status register ****/ +/* Conf: // [0] -- DSR_N RW bit // [1] -- DCD_N RW bit // [2] -- ... */ +#define PBS_UNIT_UART3_CONF_STATUS_CONF_MASK 0x0000FFFF +#define PBS_UNIT_UART3_CONF_STATUS_CONF_SHIFT 0 +/* Status: // [16] -- dtr_n RO bit // [17] -- OUT1_N RO bit // [ ... */ +#define PBS_UNIT_UART3_CONF_STATUS_STATUS_MASK 0xFFFF0000 +#define PBS_UNIT_UART3_CONF_STATUS_STATUS_SHIFT 16 + +/**** gpio0_conf_status register ****/ +/* Cntl:// [7:0] nGPAFEN; // from regfile// [15:8 ... */ +#define PBS_UNIT_GPIO0_CONF_STATUS_CONF_MASK 0x0000FFFF +#define PBS_UNIT_GPIO0_CONF_STATUS_CONF_SHIFT 0 +/* staus: +// [24:16] GPAFIN; // to regfile */ +#define PBS_UNIT_GPIO0_CONF_STATUS_STATUS_MASK 0xFFFF0000 +#define PBS_UNIT_GPIO0_CONF_STATUS_STATUS_SHIFT 16 + +/**** gpio1_conf_status register ****/ +/* Cntl:// [7:0] nGPAFEN; // from regfile// [15:8 ... */ +#define PBS_UNIT_GPIO1_CONF_STATUS_CONF_MASK 0x0000FFFF +#define PBS_UNIT_GPIO1_CONF_STATUS_CONF_SHIFT 0 +/* staus: +// [24:16] GPAFIN; // to regfile */ +#define PBS_UNIT_GPIO1_CONF_STATUS_STATUS_MASK 0xFFFF0000 +#define PBS_UNIT_GPIO1_CONF_STATUS_STATUS_SHIFT 16 + +/**** gpio2_conf_status register ****/ +/* Cntl:// [7:0] nGPAFEN; // from regfile// [15:8 ... */ +#define PBS_UNIT_GPIO2_CONF_STATUS_CONF_MASK 0x0000FFFF +#define PBS_UNIT_GPIO2_CONF_STATUS_CONF_SHIFT 0 +/* staus: +// [24:16] GPAFIN; // to regfile */ +#define PBS_UNIT_GPIO2_CONF_STATUS_STATUS_MASK 0xFFFF0000 +#define PBS_UNIT_GPIO2_CONF_STATUS_STATUS_SHIFT 16 + +/**** gpio3_conf_status register ****/ +/* Cntl:// [7:0] nGPAFEN; // from regfile// [15:8 ... */ +#define PBS_UNIT_GPIO3_CONF_STATUS_CONF_MASK 0x0000FFFF +#define PBS_UNIT_GPIO3_CONF_STATUS_CONF_SHIFT 0 +/* staus: +// [24:16] GPAFIN; // to regfile */ +#define PBS_UNIT_GPIO3_CONF_STATUS_STATUS_MASK 0xFFFF0000 +#define PBS_UNIT_GPIO3_CONF_STATUS_STATUS_SHIFT 16 + +/**** gpio4_conf_status register ****/ +/* Cntl:// [7:0] nGPAFEN; // from regfile// [15:8 ... */ +#define PBS_UNIT_GPIO4_CONF_STATUS_CONF_MASK 0x0000FFFF +#define PBS_UNIT_GPIO4_CONF_STATUS_CONF_SHIFT 0 +/* staus: +// [24:16] GPAFIN; // to regfile */ +#define PBS_UNIT_GPIO4_CONF_STATUS_STATUS_MASK 0xFFFF0000 +#define PBS_UNIT_GPIO4_CONF_STATUS_STATUS_SHIFT 16 + +/**** i2c_gen_conf_status register ****/ +/* cntl +// [0] -- dma_tx_ack +// [1] -- dma_rx_ack */ +#define PBS_UNIT_I2C_GEN_CONF_STATUS_CONF_MASK 0x0000FFFF +#define PBS_UNIT_I2C_GEN_CONF_STATUS_CONF_SHIFT 0 +/* Status// [16] – dma_tx_req RO bit// [17] -- dma_tx_single RO ... */ +#define PBS_UNIT_I2C_GEN_CONF_STATUS_STATUS_MASK 0xFFFF0000 +#define PBS_UNIT_I2C_GEN_CONF_STATUS_STATUS_SHIFT 16 + +/**** watch_dog_reset_out register ****/ +/* [0] if set to 1'b1, WD0 can not generate reset_out_n[1] if se ... */ +#define PBS_UNIT_WATCH_DOG_RESET_OUT_DISABLE_MASK 0x0000000F +#define PBS_UNIT_WATCH_DOG_RESET_OUT_DISABLE_SHIFT 0 + +/**** otp_cntl register ****/ +/* from reg file Config To bypass the copy from OTPW to OTPR */ +#define PBS_UNIT_OTP_CNTL_IGNORE_OTPW (1 << 0) +/* Not use comes from bond. */ +#define PBS_UNIT_OTP_CNTL_IGNORE_PRELOAD (1 << 1) +/* margin read from the fuse box */ +#define PBS_UNIT_OTP_CNTL_OTPW_MARGIN_READ (1 << 2) +/* Indicate when OTP busy */ +#define PBS_UNIT_OTP_CNTL_OTP_BUSY (1 << 3) + +/**** otp_cfg_0 register ****/ +/* cfg to to OTP cntl. */ +#define PBS_UNIT_OTP_CFG_0_CFG_OTPW_PWRDN_CNT_MASK 0x0000FFFF +#define PBS_UNIT_OTP_CFG_0_CFG_OTPW_PWRDN_CNT_SHIFT 0 +/* cfg to to OTP cntl. */ +#define PBS_UNIT_OTP_CFG_0_CFG_OTPW_READ_CNT_MASK 0xFFFF0000 +#define PBS_UNIT_OTP_CFG_0_CFG_OTPW_READ_CNT_SHIFT 16 + +/**** otp_cfg_1 register ****/ +/* cfg to to OTP cntl. */ +#define PBS_UNIT_OTP_CFG_1_CFG_OTPW_PGM_CNT_MASK 0x0000FFFF +#define PBS_UNIT_OTP_CFG_1_CFG_OTPW_PGM_CNT_SHIFT 0 +/* cfg to to OTP cntl. */ +#define PBS_UNIT_OTP_CFG_1_CFG_OTPW_PREP_CNT_MASK 0xFFFF0000 +#define PBS_UNIT_OTP_CFG_1_CFG_OTPW_PREP_CNT_SHIFT 16 + +/**** otp_cfg_3 register ****/ +/* cfg to to OTP cntl. */ +#define PBS_UNIT_OTP_CFG_3_CFG_OTPW_PS18_CNT_MASK 0x0000FFFF +#define PBS_UNIT_OTP_CFG_3_CFG_OTPW_PS18_CNT_SHIFT 0 +/* cfg to to OTP cntl. */ +#define PBS_UNIT_OTP_CFG_3_CFG_OTPW_PWRUP_CNT_MASK 0xFFFF0000 +#define PBS_UNIT_OTP_CFG_3_CFG_OTPW_PWRUP_CNT_SHIFT 16 + +/**** nb_nic_regs_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_NB_NIC_REGS_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_NB_NIC_REGS_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reserved fields */ +#define PBS_UNIT_NB_NIC_REGS_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_NB_NIC_REGS_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_NB_NIC_REGS_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_NB_NIC_REGS_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** sb_nic_regs_bar_low register ****/ +/* window size = 2 ^ (15 + win_size), zero value disable the win ... */ +#define PBS_UNIT_SB_NIC_REGS_BAR_LOW_WIN_SIZE_MASK 0x0000003F +#define PBS_UNIT_SB_NIC_REGS_BAR_LOW_WIN_SIZE_SHIFT 0 +/* reserved fields */ +#define PBS_UNIT_SB_NIC_REGS_BAR_LOW_RSRVD_MASK 0x0000FFC0 +#define PBS_UNIT_SB_NIC_REGS_BAR_LOW_RSRVD_SHIFT 6 +/* Rsrvd */ +#define PBS_UNIT_SB_NIC_REGS_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000 +#define PBS_UNIT_SB_NIC_REGS_BAR_LOW_ADDR_HIGH_SHIFT 16 + +/**** serdes_mux_multi_0 register ****/ +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_8_MASK 0x00000007 +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_8_SHIFT 0 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_RSRVD_3 (1 << 3) +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_9_MASK 0x00000070 +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_9_SHIFT 4 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_RSRVD_7 (1 << 7) +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_10_MASK 0x00000700 +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_10_SHIFT 8 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_RSRVD_11 (1 << 11) +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_11_MASK 0x00007000 +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_11_SHIFT 12 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_RSRVD_15 (1 << 15) +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_12_MASK 0x00030000 +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_12_SHIFT 16 +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_13_MASK 0x000C0000 +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_13_SHIFT 18 +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_14_MASK 0x00300000 +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_14_SHIFT 20 +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_15_MASK 0x00C00000 +#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_15_SHIFT 22 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_MULTI_0_RSRVD_MASK 0xFF000000 +#define PBS_UNIT_SERDES_MUX_MULTI_0_RSRVD_SHIFT 24 + +/**** serdes_mux_multi_1 register ****/ +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_A_0_MASK 0x00000003 +#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_A_0_SHIFT 0 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_3_2_MASK 0x0000000C +#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_3_2_SHIFT 2 +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_B_0_MASK 0x00000070 +#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_B_0_SHIFT 4 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_7 (1 << 7) +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_C_0_MASK 0x00000300 +#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_C_0_SHIFT 8 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_11_10_MASK 0x00000C00 +#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_11_10_SHIFT 10 +/* serdes one hot mux control. */ +#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_D_0_MASK 0x00007000 +#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_D_0_SHIFT 12 +/* Rsrvd */ +#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_MASK 0xFFFF8000 +#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_SHIFT 15 + +/**** pbs_ulpi_mux_conf register ****/ +/* Value 0 – select dedicate pins for the USB-1 inputs */ +#define PBS_UNIT_PBS_ULPI_MUX_CONF_SEL_UPLI_IN_PBSMUX_MASK 0x000007FF +#define PBS_UNIT_PBS_ULPI_MUX_CONF_SEL_UPLI_IN_PBSMUX_SHIFT 0 +/* [3] - force to zero[2] == 1 - forcee register selection [1 : ... */ +#define PBS_UNIT_PBS_ULPI_MUX_CONF_REG_MDIO_BYPASS_SEL_MASK 0x0000F000 +#define PBS_UNIT_PBS_ULPI_MUX_CONF_REG_MDIO_BYPASS_SEL_SHIFT 12 +/* [0] set the clk_ulpi OE for USB0, 1'b0 set to input 1'b1 set ... */ +#define PBS_UNIT_PBS_ULPI_MUX_CONF_RSRVD_MASK 0xFFFF0000 +#define PBS_UNIT_PBS_ULPI_MUX_CONF_RSRVD_SHIFT 16 + +/**** wr_once_dbg_dis_ovrd_reg register ****/ +/* This register can be written only once.. */ +#define PBS_UNIT_WR_ONCE_DBG_DIS_OVRD_REG_WR_ONCE_DBG_DIS_OVRD (1 << 0) + +#define PBS_UNIT_WR_ONCE_DBG_DIS_OVRD_REG_RSRVD_MASK 0xFFFFFFFE +#define PBS_UNIT_WR_ONCE_DBG_DIS_OVRD_REG_RSRVD_SHIFT 1 + +/**** gpio5_conf_status register ****/ +/* Cntl: // [7:0] nGPAFEN; // from regfile // [15 ... */ +#define PBS_UNIT_GPIO5_CONF_STATUS_CONF_MASK 0x0000FFFF +#define PBS_UNIT_GPIO5_CONF_STATUS_CONF_SHIFT 0 +/* staus: // [24:16] GPAFIN; // to regfile */ +#define PBS_UNIT_GPIO5_CONF_STATUS_STATUS_MASK 0xFFFF0000 +#define PBS_UNIT_GPIO5_CONF_STATUS_STATUS_SHIFT 16 + +/**** pbs_sb2nb_cfg_dram_remap register ****/ +#define PBS_UNIT_SB2NB_REMAP_BASE_ADDR_SHIFT 5 +#define PBS_UNIT_SB2NB_REMAP_BASE_ADDR_MASK 0x0000FFE0 +#define PBS_UNIT_SB2NB_REMAP_TRANSL_BASE_ADDR_SHIFT 21 +#define PBS_UNIT_SB2NB_REMAP_TRANSL_BASE_ADDR_MASK 0xFFE00000 + +/* For remapping are used bits [39 - 29] of DRAM 40bit Physical address */ +#define PBS_UNIT_DRAM_SRC_REMAP_BASE_ADDR_SHIFT 29 +#define PBS_UNIT_DRAM_DST_REMAP_BASE_ADDR_SHIFT 29 +#define PBS_UNIT_DRAM_REMAP_BASE_ADDR_MASK 0xFFE0000000 + + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_PBS_REG_H */ + + + + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_pcie.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_pcie.h new file mode 100644 index 00000000000000..af90e30712070f --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_pcie.h @@ -0,0 +1,827 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup grouppcie PCI Express Controller + * @{ + * @section overview Overview + * This header file provide API for the HAL driver of the pcie port, the driver + * provides the following functionalities: + * - Port initialization + * - Link operation + * - Interrupts transactions generation (Endpoint mode). + * - Configuration Access management functions + * - Internal Translation Unit programming + * + * This API does not provide the following: + * - PCIe transactions generation and reception (except interrupts as mentioned + * above) as this functionality is done by the port without need for sw + * intervention. + * - Configuration Access: those transactions are generated automatically by + * the port (ECAM or ATU mode) when the CPU issues memory transaction + * through the fabric toward the PCIe port. This API provides management + * function for controlling the Configuration Access type and bus destination + * - Interrupt Handling. + * - Message Generation: common used messages are automatically generated, also, + * the ATU generic mechanism for generating various kind of messages. + * - PCIe Port Management: both link and port power management features can be + * managed using the PCI/PCIe standard power management and PCIe capabilities + * registers. + * - PCIe link and protocol error handling: the feature can be managed using + * the Advanced Error Handling PCIe capability registers. + * + * @section flows Software Flows + * @subsection init Initialization + * - allocation and set zeros al_pcie_port structure handle + * - call al_pcie_handle_init() with pointer to the allocated al_pcie_port handle, + * address of the port internal registers space, and port id. + * - set the port mode, End-Point or Root-Compex (default). + * - set number of lanes connected to the controller. + * - enable the controller using the al_pcie_port_enable(). note that this function + * expect the virtual address of the PBS registers as first parameter. + * - wait for 2000 Southbridge cycles. + * - prepare al_pcie_config_params structure depending on chip, board and system + * configuration. + * for example, when using the port as root complex, the function_mode field + * should be set to AL_PCIE_FUNCTION_MODE_RC. In this example we prepare the + * following configuration: + * - Root Complex mode + * - Set the Max Link Speed to Gen2 + * - Set the max lanes width to 2 (x2) + * - Disable reversal mode + * - Enable Snoops to support I/O Hardware cache coherency + * - Enable pcie core RAM parity + * - Enable pcie core AXI parity + * - Keep transaction layer default credits + * so the structures we prepare: + * @code + * - struct al_pcie_link_params link_params = { AL_PCIE_LINK_SPEED_GEN2, + * AL_FALSE}; // disable reversal mode + * + * - struct al_pcie_config_params config_params = { AL_PCIE_FUNCTION_MODE_RC, + * &link_params, + * AL_TRUE, // enable Snoop for inbound memory transactions + * AL_TRUE, // enable pcie port RAM parity + * AL_TRUE, // enable pcie port AXI parity + * NULL, // use default latency/replay timers + * NULL, // use default gen2 pipe params + * NULL, // gen3_params not needed when max speed set to Gen2 + * NULL, // don't change TL credits + * NULL, // end point params not needed + * AL_FALSE, //no fast link + * AL_FALSE //return 0xFFFFFFFF for read transactions with pci target error + * } + * @endcode + * - now call al_pcie_port_config() with the handle and the config_params structure. + * @subsection linkinit Link Initialization + * - once the port configured, we can start PCIe link: + * - call al_pcie_link_start() + * - call al_pcie_link_up_wait() + * - allocate al_pcie_link_status struct and call al_pcie_link_status() and + * check the link is established. + * + * @subsection cap Configuration Access Preparation + * - Once the link is established, we can prepare the port for pci + * configuration access, this stage requires system knowledge about the PCI + * buses enumeration. For example, if 5 buses were discovered on previously + * scanned root complex port, then we should start enumeration from bus 5 (PCI + * secondary bus), the sub-ordenary bus will be temporarily set to maximum + * value (255) until the scan process under this bus is finished, then it will + * updated to the maximum bus value found. So we use the following sequence: + * - call al_pcie_secondary_bus_set() with secbus = 5 + * - call al_pcie_subordinary_bus_set() with subbus = 255 + * + * @subsection cfg Configuration (Cfg) Access Generation + * - we assume using ECAM method, in this method, the software issues pcie Cfg + * access by accessing the ECAM memory space of the pcie port. For example, to + * issue 4 byte Cfg Read from bus B, Device D, Function F and register R, the + * software issues 4 byte read access to the following physical address + * ECAM base address of the port + (B << 20) + (D << 15) + (F << 12) + R. + * But, as the default size of the ECAM address space is less than + * needed full range (256MB), we modify the target_bus value prior to Cfg + * access in order make the port generate Cfg access with bus value set to the + * value of the target_bus rather than bits 27:20 of the physical address. + * - call al_pcie_target_bus_set() with target_bus set to the required bus of + * the next Cfg access to be issued, mask_target_bus will be set to 0xff. + * no need to call that function if the next Cfg access bus equals to the last + * value set to target_bus. + * + * @file al_hal_pcie.h + * @brief HAL Driver Header for the Annapurna Labs PCI Express port. + */ + +#ifndef _AL_HAL_PCIE_H_ +#define _AL_HAL_PCIE_H_ + +#include + +/****************************** Constants ***********************************/ +/** Inbound header credits sum */ +#define AL_PCIE_IB_HCRD_SUM 97 + +/** + * Minimal ratio between outstanding header completions and the number of + * outstanding outbound reads + * (max request size / cache line) + 1 = 256/64+1 + */ +#define AL_PCIE_NOF_CPL_HDR_NOF_OS_OB_READS_MIN_RATIO 5 + +/** Maximal value for outstanding headers reads and header writes */ +#define AL_PCIE_NOF_P_NP_HDR_MAX 24 + +/*********************** Data Structures and Types **************************/ + +/** + * al_pcie_port: data structure used by the HAL to handle a specific pcie port. + * this structure is allocated and set to zeros by the upper layer, then it is + * initialized by the al_pcie_handle_init() that should be called before any + * other function of this API. later, this handle passed to the API functions. + */ +struct al_pcie_port { + struct al_pcie_regs __iomem *regs; + + /* Revision ID - valid after calling 'al_pcie_port_config' */ + uint8_t rev_id; + + unsigned int port_id; + al_bool write_to_read_only_enabled; + uint8_t max_lanes; + + /** + * Determine whether configuring 'nof_np_hdr' and 'nof_p_hdr' is + * required in the core + */ + al_bool ib_hcrd_config_required; + + /* Internally used - see 'al_pcie_ib_hcrd_os_ob_reads_config' */ + unsigned int nof_np_hdr; + + /* Internally used - see 'al_pcie_ib_hcrd_os_ob_reads_config' */ + unsigned int nof_p_hdr; + + /* Internally used - see al_hal_pcie_interrupts.c */ + uint32_t __iomem *app_int_grp_a_base; + uint32_t __iomem *app_int_grp_b_base; + uint32_t __iomem *axi_int_grp_a_base; +}; + + +/** Function mode (endpoint, root complex) */ +enum al_pcie_function_mode { + AL_PCIE_FUNCTION_MODE_EP, + AL_PCIE_FUNCTION_MODE_RC, + AL_PCIE_FUNCTION_MODE_UNKNOWN +}; + +/* The maximum link speed, measured GT/s (Giga transfer / second) + * DEFAULT: do not change the current speed + * GEN1: 2.5 GT/s + * GEN2: 5 GT/s + * GEN3: 8GT/s + */ +enum al_pcie_link_speed { + AL_PCIE_LINK_SPEED_DEFAULT, + AL_PCIE_LINK_SPEED_GEN1 = 1, + AL_PCIE_LINK_SPEED_GEN2 = 2, + AL_PCIE_LINK_SPEED_GEN3 = 3 +}; + +/** PCIe capabilities that supported by a specific port */ +struct al_pcie_max_capability { + al_bool end_point_mode_supported; + al_bool root_complex_mode_supported; + enum al_pcie_link_speed max_speed; + uint8_t max_lanes; + al_bool reversal_supported; + uint8_t atu_regions_num; + uint32_t atu_min_size; +}; + + +/** PCIe link related parameters */ +struct al_pcie_link_params { + enum al_pcie_link_speed max_speed; + al_bool enable_reversal; +}; + +/** PCIe gen2 link parameters */ +struct al_pcie_gen2_params { + al_bool tx_swing_low; /* set tx swing low when true, and tx swing full when false */ + al_bool tx_compliance_receive_enable; + al_bool set_deemphasis; +}; + +/** PCIe gen 3 standard per lane equalization parameters */ +struct al_pcie_gen3_lane_eq_params { + uint8_t downstream_port_transmitter_preset; + uint8_t downstream_port_receiver_preset_hint; + uint8_t upstream_port_transmitter_preset; + uint8_t upstream_port_receiver_preset_hint; +}; + +/** PCIe gen 3 equalization parameters */ +struct al_pcie_gen3_params { + al_bool perform_eq; + al_bool interrupt_enable_on_link_eq_request; + struct al_pcie_gen3_lane_eq_params *eq_params; /* array of lanes params */ + int eq_params_elements; /* number of elements in the eq_params array */ + + al_bool eq_disable; /* disables the equalization feature */ + al_bool eq_phase2_3_disable; /* Equalization Phase 2 and Phase 3 */ + /* Disable (RC mode only) */ + uint8_t local_lf; /* Full Swing (FS) Value for Gen3 Transmit Equalization */ + /* Value Range: 12 through 63 (decimal).*/ + + uint8_t local_fs; /* Low Frequency (LF) Value for Gen3 Transmit Equalization */ +}; + +/** Transport Layer credits parameters */ +struct al_pcie_tl_credits_params { +}; + +/** BAR register configuration parameters (Endpoint Mode only) */ +struct al_pcie_ep_bar_params { + al_bool enable; + al_bool memory_space; /**< memory or io */ + al_bool memory_64_bit; /**< is memory space is 64 bit */ + al_bool memory_is_prefetchable; + uint64_t size; /* the bar size in bytes */ +}; + +/** BARs configuration parameters (Endpoint Mode only) */ +struct al_pcie_ep_params { + al_bool cap_d1_d3hot_dis; + al_bool cap_flr_dis; + al_bool cap_aspm_dis; + al_bool relaxed_pcie_ordering; + al_bool bar_params_valid; + struct al_pcie_ep_bar_params bar_params[6]; + struct al_pcie_ep_bar_params exp_bar_params;/* expansion ROM BAR*/ +}; + +/** Various configuration features */ +struct al_pcie_features { + /** + * Enable MSI fix from the SATA to the PCIe EP + * Only valid for port 0, when enabled as EP + */ + al_bool sata_ep_msi_fix; +}; + +/** + * Inbound posted/non-posted header credits and outstanding outbound reads + * completion header configuration + * + * Constraints: + * - nof_cpl_hdr + nof_np_hdr + nof_p_hdr == AL_PCIE_IB_HCRD_SUM + * - (nof_outstanding_ob_reads x AL_PCIE_NOF_CPL_HDR_NOF_OS_OB_READS_MIN_RATIO) + * <= nof_cpl_hdr + * - nof_p_hdr <= AL_PCIE_NOF_P_NP_HDR_MAX + * - nof_np_hdr <= AL_PCIE_NOF_P_NP_HDR_MAX + * - nof_cpl_hdr > 0 + * - nof_p_hdr > 0 + * - nof_np_hdr > 0 + */ +struct al_pcie_ib_hcrd_os_ob_reads_config { + /** Max number of outstanding outbound reads */ + uint8_t nof_outstanding_ob_reads; + + /** + * This value set the possible outstanding headers CMPLs , the core + * can get (the core always advertise infinite credits for CMPLs). + */ + unsigned int nof_cpl_hdr; + + /** + * This value set the possible outstanding headers reads (non-posted + * transactions), the core can get (it set the value in the init FC + * process). + */ + unsigned int nof_np_hdr; + + /** + * This value set the possible outstanding headers writes (posted + * transactions), the core can get (it set the value in the init FC + * process). + */ + unsigned int nof_p_hdr; +}; + +/** I/O Virtualization support in EP configuration */ +struct al_pcie_ep_iov_params { + /** + * Enable multiple Virtual Functions support by propogating VMID to + * outbound requests + */ + + al_bool sriov_vfunc_en; + + /** + * Fix client1 FMT bits after cutting address 63:56, fix address format + * to 32-bits if original request is 32-bit address. + */ + al_bool support_32b_address_in_iov; +}; + +/** PCIe Ack/Nak Latency and Replay timers */ +struct al_pcie_latency_replay_timers { + uint16_t round_trip_lat_limit; + uint16_t replay_timer_limit; +}; + +/** PCIe port configuration parameters + * This structure includes the parameters that the HAL should apply to the port + * (by al_pcie_port_config()). + * The fields that are pointers (e.g. link_params) can be set to NULL, in that + * case, the al_pcie_port_config() will keep the current HW settings. + */ +struct al_pcie_config_params { + enum al_pcie_function_mode function_mode; /**< indicates at which mode the controller operates */ + struct al_pcie_link_params *link_params; + al_bool enable_axi_snoop; + al_bool enable_ram_parity_int; + al_bool enable_axi_parity_int; + struct al_pcie_latency_replay_timers *lat_rply_timers; + struct al_pcie_gen2_params *gen2_params; + struct al_pcie_gen3_params *gen3_params; + struct al_pcie_tl_credits_params *tl_credits; + struct al_pcie_ep_params *ep_params; + struct al_pcie_features *features; + struct al_pcie_ep_iov_params *ep_iov_params; + al_bool fast_link_mode; /* Sets all internal timers to Fast Mode for speeding up simulation.*/ + al_bool enable_axi_slave_err_resp; /**< when true, the PCI unit will return Slave Error/Decoding Error to the master unit in case of error. when false, the value 0xFFFFFFFF will be returned without error indication. */ +}; + +/** PCIe link status */ +struct al_pcie_link_status { + al_bool link_up; + enum al_pcie_link_speed speed; + uint8_t lanes; + uint8_t ltssm_state; +}; + +/** PCIe MSIX capability configuration parameters */ +struct al_pcie_msix_params { + uint16_t table_size; + uint16_t table_offset; + uint8_t table_bar; + uint16_t pba_offset; + uint16_t pba_bar; +}; + +/*********************** PCIe Port Initialization API **************/ +/** Enable PCIe unit (deassert reset) + * + * @param pcie_port pcie port handle + * @param pbs_reg_base the virtual base address of the pbs registers + * + * @return 0 if no error found. + */ +int al_pcie_port_enable( + struct al_pcie_port *pcie_port, + void __iomem *pbs_reg_base); + +/** Disable PCIe unit (assert reset) + * + * @param pcie_port pcie port handle + * @param pbs_reg_base the virtual base address of the pbs registers + */ +void al_pcie_port_disable( + struct al_pcie_port *pcie_port, + void __iomem *pbs_reg_base); + +/** + * Initializes a PCIe handle structure. + * + * @param pcie_port an allocated, non-initialized instance. + * @param pcie_reg_base the virtual base address of the port internal registers + * @param port_id the port id (used mainly for debug messages) + * + * @return 0 if no error found. + */ +int al_pcie_handle_init(struct al_pcie_port *pcie_port, + void __iomem *pcie_reg_base, + unsigned int port_id); + +/** + * Configure number of lanes connected to this port. + * This function can be called only before enabling the controller using al_pcie_port_enable(). + * + * @param pcie_port pcie port handle + * @param lanes number of lanes + * + * @return 0 if no error found. + */ +int al_pcie_port_max_lanes_set(struct al_pcie_port *pcie_port, uint8_t lanes); + +/** + * Port memory shutdown/up + * This function can be called only when the controller is disabled + * + * @param pcie_port pcie port handle + * @param enable memory shutdown enable or disable + * + */ +void al_pcie_port_memory_shutdown_set( + struct al_pcie_port *pcie_port, + al_bool enable); + +/** + * @brief set current function mode (root complex or endpoint) + * This function can be called only before enabling the controller using al_pcie_port_enable(). + * + * @param pcie_port pcie port handle + * @param mode pcie port mode + * + * @return 0 if no error found. + */ +int al_pcie_port_func_mode_config(struct al_pcie_port *pcie_port, + enum al_pcie_function_mode mode); + +/** + * @brief Inbound posted/non-posted header credits and outstanding outbound + * reads completion header configuration + * + * @param pcie_port pcie port handle + * @param ib_hcrd_os_ob_reads_config + * Inbound header credits and outstanding outbound reads + * configuration + */ +void al_pcie_port_ib_hcrd_os_ob_reads_config( + struct al_pcie_port *pcie_port, + struct al_pcie_ib_hcrd_os_ob_reads_config *ib_hcrd_os_ob_reads_config); + +/** + * @brief return current function mode (root complex or endpoint) + * + * @param pcie_port pcie port handle + * + * @return pcie port current mode. + */ +enum al_pcie_function_mode +al_pcie_function_type_get(struct al_pcie_port *pcie_port); + + +/** + * @brief configure pcie port (mode, link params, etc..) + * this function must be called before initializing the link + * + * @param pcie_port pcie port handle + * @param params configuration structure. + * + * @return 0 if no error found + */ +int al_pcie_port_config(struct al_pcie_port *pcie_port, + struct al_pcie_config_params *params); + +/** + * @brief Enable/disable deferring incoming configuration requests until + * initialization is complete. When enabled, the core completes incoming + * configuration requests with a Configuration Request Retry Status. + * Other incoming Requests complete with Unsupported Request status. + * + * @param pcie_port pcie port handle + * @param en enable/disable + */ +void al_pcie_app_req_retry_set( + struct al_pcie_port *pcie_port, + al_bool en); + +/** + * @brief configure pcie port axi snoop + * + * @param pcie_port pcie port handle + * @param enable_axi_snoop enable snoop. + * + * @return 0 if no error found + */ +int al_pcie_port_snoop_config(struct al_pcie_port *pcie_port, + al_bool enable_axi_snoop); + +/********************** PCIE Link Operations API ********************/ +/** + * @brief start pcie link + * + * @param pcie_port pcie port handle + * + * @return 0 if no error found + */ +int al_pcie_link_start(struct al_pcie_port *pcie_port); + +/** + * @brief stop pcie link + * + * @param pcie_port pcie port handle + * + * @return 0 if no error found + */ +int al_pcie_link_stop(struct al_pcie_port *pcie_port); + +/** + * @brief wait for link up indication + * this function waits for link up indication, it polls LTSSM state until link is ready + * + * @param pcie_port pcie port handle + * @param timeout_ms maximum timeout in milli-seconds to wait for link up + * + * @return 0 if link up indication detected + * -ETIME if not. + */ +int al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms); + +/** + * @brief get link status + * + * @param pcie_port pcie port handle + * @param status structure for link status + * + * @return 0 if no error found + */ +int al_pcie_link_status(struct al_pcie_port *pcie_port, struct al_pcie_link_status *status); + +/** + * @brief trigger hot reset + * this function triggers hot-reset, it doesn't wait for link re-establishment + * + * @param pcie_port pcie port handle + * + * @return 0 if no error found + */ +int al_pcie_link_hot_reset(struct al_pcie_port *pcie_port); + +/* TODO: check if this function needed */ +int al_pcie_link_change_speed(struct al_pcie_port *pcie_port, enum al_pcie_link_speed new_speed); + +/* TODO: check if this function needed */ +int al_pcie_link_change_width(struct al_pcie_port *pcie_port, uint8_t width); + + +/* Configuration Space Access Through PCI-E_ECAM_Ext PASW (RC mode only) */ + +/** + * @brief set target_bus and mask_target_bus + * @param pcie_port pcie port handle + * @param target_bus + * @param mask_target_bus + * @return 0 if no error found + */ +int al_pcie_target_bus_set(struct al_pcie_port *pcie_port, + uint8_t target_bus, + uint8_t mask_target_bus); + +/** + * @brief get target_bus and mask_target_bus + * @param pcie_port pcie port handle + * @param target_bus + * @param mask_target_bus + * @return 0 if no error found + */ +int al_pcie_target_bus_get(struct al_pcie_port *pcie_port, + uint8_t *target_bus, + uint8_t *mask_target_bus); + +/** + * Set secondary bus number + * + * @param pcie_port pcie port handle + * @param secbus pci secondary bus number + * + * @return 0 if no error found. + */ +int al_pcie_secondary_bus_set(struct al_pcie_port *pcie_port, uint8_t secbus); + +/** + * Set subordinary bus number + * + * @param pcie_port pcie port handle + * @param subbus the highest bus number of all of the buses that can be reached + * downstream of the PCIE instance. + * + * @return 0 if no error found. + */ +int al_pcie_subordinary_bus_set(struct al_pcie_port *pcie_port,uint8_t subbus); + + +/** + * @brief get base address of pci configuration space header + * @param pcie_port pcie port handle + * @param addr pointer for returned address; + * @return 0 if no error found + */ +int al_pcie_config_space_get(struct al_pcie_port *pcie_port, + uint8_t __iomem **addr); + +/** + * Read data from the local configuration space + * + * @param pcie_port + * PCIe port handle + * @param reg_offset + * Configuration space register offset + * + * @return Read data + */ +uint32_t al_pcie_cfg_emul_local_cfg_space_read( + struct al_pcie_port *pcie_port, + unsigned int reg_offset); + +/** + * Write data to the local configuration space + * + * @param pcie_port + * PCIe port handle + * @param reg_offset + * Configuration space register offset + * @param data + * Data to write + * @param ro + * Is a read-only register according to PCIe specification + * + */ +void al_pcie_cfg_emul_local_cfg_space_write( + struct al_pcie_port *pcie_port, + unsigned int reg_offset, + uint32_t data, + al_bool ro); + +/******************* Internal Address Translation Unit (ATU) *************/ +enum al_pcie_atu_dir { + al_pcie_atu_dir_outbound = 0, + al_pcie_atu_dir_inbound = 1, +}; + +enum al_pcie_atu_tlp { + AL_PCIE_TLP_TYPE_MEM = 0, + AL_PCIE_TLP_TYPE_IO = 2, + AL_PCIE_TLP_TYPE_CFG0 = 4, + AL_PCIE_TLP_TYPE_CFG1 = 5, + AL_PCIE_TLP_TYPE_MSG = 0x10, + AL_PCIE_TLP_TYPE_RESERVED = 0x1f +}; + +struct al_pcie_atu_region { + al_bool enable; + enum al_pcie_atu_dir direction; /* outbound or inbound */ + uint8_t index; /* region index */ + uint64_t base_addr; + uint64_t limit; /* only bits [39:0] are valid given the Alpine PoC maximum physical address space */ + uint64_t target_addr; /* the address that matches will be translated to this address + offset */ + al_bool invert_matching; + enum al_pcie_atu_tlp tlp_type; /* pcie tlp type*/ + uint8_t attr; /* pcie frame header attr field*/ + /* outbound specific params */ + uint8_t msg_code; /* pcie message code */ + al_bool cfg_shift_mode; + /* inbound specific params*/ + uint8_t bar_number; + uint8_t match_mode; /* BAR match mode, used in EP for MEM and IO tlps*/ + al_bool enable_attr_match_mode; + al_bool enable_msg_match_mode; +}; + +/** + * @brief program internal ATU region entry + * @param pcie_port pcie port handle + * @param atu_region data structure that contains the region index and the translation parameters + * @return + */ +int al_pcie_atu_region_set(struct al_pcie_port *pcie_port, struct al_pcie_atu_region *atu_region); + +/** + * @brief Configure axi io bar. every hit to this bar will override size to 4 bytes. + * @param pcie_port pcie port handle + * @param start the first address of the memory + * @param end the last address of the memory + * @return + */ +void al_pcie_axi_io_config(struct al_pcie_port *pcie_port, al_phys_addr_t start, al_phys_addr_t end); + +/********************** Interrupt generation (Endpoint mode Only) ************/ + +enum al_pcie_legacy_int_type{ + AL_PCIE_LEGACY_INTA = 0, + AL_PCIE_LEGACY_INTB, + AL_PCIE_LEGACY_INTC, + AL_PCIE_LEGACY_INTD +}; + +/** + * @brief generate INTx Assert/DeAssert Message + * @param pcie_port pcie port handle + * @param assert when true, Assert Message is sent. + * @param type of message (INTA, INTB, etc) + * @return 0 if no error found + */ +int al_pcie_legacy_int_gen(struct al_pcie_port *pcie_port, al_bool assert, + enum al_pcie_legacy_int_type type /*A,B,..*/); + +/** + * @brief generate MSI interrupt + * @param pcie_port pcie port handle + * @param vector the vector index to send interrupt for. + * @return 0 if no error found + */ +int al_pcie_msi_int_gen(struct al_pcie_port *pcie_port, uint8_t vector); + +/** + * @brief configure MSIX capability + * @param pcie_port pcie port handle + * @param msix_params MSIX capability configuration parameters + * @return 0 if no error found + */ +int al_pcie_msix_config( + struct al_pcie_port *pcie_port, + struct al_pcie_msix_params *msix_params); + +/** + * @brief check whether MSIX capability is enabled + * @param pcie_port pcie port handle + * @return AL_TRUE if MSIX capability is enabled, AL_FALSE otherwise + */ +al_bool al_pcie_msix_enabled(struct al_pcie_port *pcie_port); + +/** + * @brief check whether MSIX capability is masked + * @param pcie_port pcie port handle + * @return AL_TRUE if MSIX capability is masked, AL_FALSE otherwise + */ +al_bool al_pcie_msix_masked(struct al_pcie_port *pcie_port); + +/********************** Loopback mode (RC and Endpoint modes) ************/ + +/** + * @brief enter local pipe loopback mode + * This mode will connect the pipe RX signals to TX. + * no need to start link when using this mode. + * Gen3 equalization must be disabled before enabling this mode + * The caller must make sure the port is ready to accept the TLPs it sends to + * itself. for example, BARs should be initialized before sending memory TLPs. + * + * @param pcie_port pcie port handle + * @return 0 if no error found + */ +int al_pcie_local_pipe_loopback_enter(struct al_pcie_port *pcie_port); + +/** + * @brief exit local pipe loopback mode + * + * @param pcie_port pcie port handle + * @return 0 if no error found + */ +int al_pcie_local_pipe_loopback_exit(struct al_pcie_port *pcie_port); + +/** + * @brief enter master remote loopback mode + * No need to configure the link partner to enter slave remote loopback mode + * as this should be done as response to special training sequence directives + * when master works in remote loopback mode. + * The caller must make sure the port is ready to accept the TLPs it sends to + * itself. for example, BARs should be initialized before sending memory TLPs. + * + * @param pcie_port pcie port handle + * @return 0 if no error found + */ +int al_pcie_remote_loopback_enter(struct al_pcie_port *pcie_port); + +/** + * @brief exit remote loopback mode + * + * @param pcie_port pcie port handle + * @return 0 if no error found + */ +int al_pcie_remote_loopback_exit(struct al_pcie_port *pcie_port); + +#endif +/** @} end of grouppcie group */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_plat_services.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_plat_services.h new file mode 100644 index 00000000000000..1a9815aef274b9 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_plat_services.h @@ -0,0 +1,203 @@ +/******************************************************************************* +Copyright (C) 2015 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_services Platform Services API + * Platform Services API + * @{ + * @file al_hal_plat_services.h + * + * @brief API for Platform services provided for to HAL drivers + * + * + */ + +#ifndef __PLAT_SERVICES_H__ +#define __PLAT_SERVICES_H__ + +#include +#include +#include +#include + +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +#define al_reg_read8(l) readb(l) +#define al_reg_read16(l) readw(l) +#define al_reg_read32(l) readl(l) +#define al_reg_read64(l) + +#define al_reg_write8(l,v) writeb(v,l) +#define al_reg_write16(l,v) writew(v,l) +#define al_reg_write32(l,v) writel(v,l) + +#ifdef CONFIG_ARM +/* + * Relaxed register read/write functions don't involve cpu instructions that + * force syncronization, nor ordering between the register access and memory + * data access. + * These instructions are used in performance critical code to avoid the + * overhead of the synchronization instructions. + */ +#define al_reg_read32_relaxed(l) readl_relaxed(l) +#define al_reg_write32_relaxed(l,v) writel_relaxed(v,l) +#else +#define al_reg_read32_relaxed(l) readl(l) +#define al_reg_write32_relaxed(l,v) writel(v,l) +#endif + +/** + * print message + * + * @param type of message + * @param format + */ +#define al_print(type, fmt, ...) printk(KERN_INFO fmt, ##__VA_ARGS__) + +/** + * print error message + * + * @param format + */ +#define al_err(...) pr_err(__VA_ARGS__) + +/** + * print warning message + * + * @param format + */ +#define al_warn(...) pr_info(__VA_ARGS__) + +/** + * print info message + * + * @param format + */ +#define al_info(...) pr_info(__VA_ARGS__) + +/** + * print debug message + * + * @param format + */ +#define al_dbg(...) pr_debug(__VA_ARGS__) + +/** + * Assertion + * + * @param condition + */ +#define al_assert(COND) BUG_ON(!(COND)) + +/** + * Make sure data will be visible by DMA masters. usually this is achieved by + * the ARM DMB instruction. + */ + +static inline void al_data_memory_barrier(void) +{ + mb(); +} + +/** + * Make sure data will be visible in order by other cpus masters. + */ +static inline void al_smp_data_memory_barrier(void) +{ + smp_mb(); +} + +static inline void al_local_data_memory_barrier(void) +{ + mb(); +} + +/** + * al_udelay - micro sec delay + * @param micro seconds to delay + */ +#define al_udelay(u) udelay(u) + +#define al_msleep(m) msleep(m) + +#define swap16_to_le(x) cpu_to_le16(x) +#define swap32_to_le(x) cpu_to_le32(x) +#define swap64_to_le(x) cpu_to_le64(x) +#define swap16_from_le(x) le16_to_cpu(x) +#define swap32_from_le(x) le32_to_cpu(x) +#define swap64_from_le(x) le64_to_cpu(x) + +/** + * Memory set + * + * @param memory pointer + * @param value for setting + * @param number of bytes to set + */ +#define al_memset(p, val, cnt) memset(p, val, cnt) + +/** + * memory compare + * + * @param p1 memory pointer + * @param p2 memory pointer + * @param cnt number of bytes to compare + * + * @return 0 if equal, else otherwise + */ +#define al_memcmp(p1, p2, cnt) memcmp(p1, p2, cnt) + +/** + * memory copy + * + * @param dest memory pointer to destination + * @param src memory pointer to source + * @param cnt number of bytes to copy + */ +#define al_memcpy(dest, src, cnt) memcpy(dest, src, cnt) + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of Platform Services API group */ +#endif /* __PLAT_SERVICES_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_plat_types.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_plat_types.h new file mode 100644 index 00000000000000..1bb75441852647 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_plat_types.h @@ -0,0 +1,76 @@ +/******************************************************************************* +Copyright (C) 2015 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_services Platform Services API + * Platform Services API + * @{ + * @file al_hal_plat_types.h + * + * @brief platform dependent data types + * + * + */ + +#ifndef __PLAT_TYPES_H__ +#define __PLAT_TYPES_H__ + +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/* Basic data types */ +typedef int al_bool; /*! boolean */ +#define AL_TRUE 1 +#define AL_FALSE 0 + +/*! in LPAE mode, the address address is 40 bit, we extend it to 64 bit */ +typedef dma_addr_t al_phys_addr_t; + +/*! this defines the cpu endiancess. */ +#define PLAT_ARCH_IS_LITTLE() AL_TRUE + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of Platform Services API group */ + +#endif /* __PLAT_TYPES_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_reg_utils.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_reg_utils.h new file mode 100644 index 00000000000000..d3f4b577bb588b --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_reg_utils.h @@ -0,0 +1,181 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_common HAL Common Layer + * @{ + * @file al_hal_reg_utils.h + * + * @brief Register utilities used by HALs and platform layer + * + * + */ + +#ifndef __AL_HAL_REG_UTILS_H__ +#define __AL_HAL_REG_UTILS_H__ + +#include +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +#define AL_BIT(b) (1UL << (b)) + +#define AL_ADDR_LOW(x) ((uint32_t)((al_phys_addr_t)(x))) +#define AL_ADDR_HIGH(x) ((uint32_t)((((al_phys_addr_t)(x)) >> 16) >> 16)) + +/** get field out of 32 bit register */ +#define AL_REG_FIELD_GET(reg, mask, shift) (((reg) & (mask)) >> (shift)) + +/** set field of 32 bit register */ +#define AL_REG_FIELD_SET(reg, mask, shift, val) \ + (reg) = \ + (((reg) & (~(mask))) | \ + ((((unsigned)(val)) << (shift)) & (mask))) + +/** get single bit out of 32 bit register */ +#define AL_REG_BIT_GET(reg, shift) \ + AL_REG_FIELD_GET(reg, AL_BIT(shift), shift) + +#define AL_REG_BITS_FIELD(shift, val) \ + (((unsigned)(val)) << (shift)) + +/** set single bit field of 32 bit register to a given value */ +#define AL_REG_BIT_VAL_SET(reg, shift, val) \ + AL_REG_FIELD_SET(reg, AL_BIT(shift), shift, val) + +/** set single bit of 32 bit register to 1 */ +#define AL_REG_BIT_SET(reg, shift) \ + AL_REG_BIT_VAL_SET(reg, shift, 1) + +/** clear single bit of 32 bit register */ +#define AL_REG_BIT_CLEAR(reg, shift) \ + AL_REG_BIT_VAL_SET(reg, shift, 0) + + +#define AL_BIT_MASK(n) \ + (AL_BIT(n) - 1) + +#define AL_FIELD_MASK(msb, lsb) \ + (AL_BIT(msb) + AL_BIT_MASK(msb) - AL_BIT_MASK(lsb)) + +/** clear bits specified by clear_mask */ +#define AL_REG_MASK_CLEAR(reg, clear_mask) \ + ((reg) = (((reg) & (~(clear_mask))))) + +/** set bits specified by clear_mask */ +#define AL_REG_MASK_SET(reg, clear_mask) \ + ((reg) = (((reg) | (clear_mask)))) + + +/** clear bits specified by clear_mask, and set bits specified by set_mask */ +#define AL_REG_CLEAR_AND_SET(reg, clear_mask, set_mask) \ + (reg) = (((reg) & (~(clear_mask))) | (set_mask)) + +#define AL_ALIGN_UP(val, size) \ + ((size) * (((val) + (size) - 1) / (size))) + +/** take bits selected by mask from one data, the rest from background */ +#define AL_MASK_VAL(mask, data, background) \ + (((mask) & (data)) | ((~mask) & (background))) + +/** + * 8 bits register masked write + * + * @param reg + * register address + * @param mask + * bits not selected (1) by mask will be left unchanged + * @param data + * data to write. bits not selected by mask ignored. + */ +static inline void al_reg_write8_masked(uint8_t __iomem *reg, uint8_t mask + , uint8_t data) +{ + uint8_t temp; + temp = al_reg_read8(reg); + al_reg_write8(reg, AL_MASK_VAL(mask, data, temp)); +} + + +/** + * 16 bits register masked write + * + * @param reg + * register address + * @param mask + * bits not selected (1) by mask will be left unchanged + * @param data + * data to write. bits not selected by mask ignored. + */ +static inline void al_reg_write16_masked(uint16_t __iomem *reg, uint16_t mask + , uint16_t data) +{ + uint16_t temp; + temp = al_reg_read16(reg); + al_reg_write16(reg, AL_MASK_VAL(mask, data, temp)); +} + + +/** + * 32 bits register masked write + * + * @param reg + * register address + * @param mask + * bits not selected (1) by mask will be left unchanged + * @param data + * data to write. bits not selected by mask ignored. + */ +static inline void al_reg_write32_masked(uint32_t __iomem *reg, uint32_t mask + , uint32_t data) +{ + uint32_t temp; + temp = al_reg_read32(reg); + al_reg_write32(reg, AL_MASK_VAL(mask, data, temp)); +} + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of Common group */ +#endif + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_serdes.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_serdes.h new file mode 100644 index 00000000000000..4be160775780e0 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_serdes.h @@ -0,0 +1,948 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_serdes_api API + * SerDes HAL driver API + * @ingroup group_serdes SerDes + * @{ + * + * @file al_hal_serdes.h + * + * @brief Header file for the SerDes HAL driver + * + */ + +#ifndef __AL_HAL_SERDES_H__ +#define __AL_HAL_SERDES_H__ + +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +struct al_serdes_obj; + +enum al_serdes_group { + AL_SRDS_GRP_A = 0, + AL_SRDS_GRP_B, + AL_SRDS_GRP_C, + AL_SRDS_GRP_D, + + AL_SRDS_NUM_GROUPS +}; + +struct al_serdes_group_info { + /* + * Group parent object - filled automatically by al_serdes_handle_init + */ + struct al_serdes_obj *pobj; + + /* + * Group specific register base - filled automatically by + * al_sedres_handle_init + */ + struct al_serdes_regs __iomem *regs_base; +}; + +struct al_serdes_obj { + struct al_serdes_group_info grp_info[AL_SRDS_NUM_GROUPS]; +}; + +enum al_serdes_reg_page { + AL_SRDS_REG_PAGE_0_LANE_0 = 0, + AL_SRDS_REG_PAGE_1_LANE_1, + AL_SRDS_REG_PAGE_2_LANE_2, + AL_SRDS_REG_PAGE_3_LANE_3, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_PAGE_0123_LANES_0123 = 7, +}; + +enum al_serdes_reg_type { + AL_SRDS_REG_TYPE_PMA = 0, + AL_SRDS_REG_TYPE_PCS, +}; + +enum al_serdes_lane { + AL_SRDS_LANE_0 = AL_SRDS_REG_PAGE_0_LANE_0, + AL_SRDS_LANE_1 = AL_SRDS_REG_PAGE_1_LANE_1, + AL_SRDS_LANE_2 = AL_SRDS_REG_PAGE_2_LANE_2, + AL_SRDS_LANE_3 = AL_SRDS_REG_PAGE_3_LANE_3, + + AL_SRDS_NUM_LANES, + AL_SRDS_LANES_0123 = AL_SRDS_REG_PAGE_0123_LANES_0123, +}; + +/** Serdes loopback mode */ +enum al_serdes_lb_mode { + /** No loopback */ + AL_SRDS_LB_MODE_OFF, + + /** + * Transmits the untimed, partial equalized RX signal out the transmit + * IO pins. + * No clock used (untimed) + */ + AL_SRDS_LB_MODE_PMA_IO_UN_TIMED_RX_TO_TX, + + /** + * Loops back the TX serializer output into the CDR. + * CDR recovered bit clock used (without attenuation) + */ + AL_SRDS_LB_MODE_PMA_INTERNALLY_BUFFERED_SERIAL_TX_TO_RX, + + /** + * Loops back the TX driver IO signal to the RX IO pins + * CDR recovered bit clock used (only through IO) + */ + AL_SRDS_LB_MODE_PMA_SERIAL_TX_IO_TO_RX_IO, + + /** + * Parallel loopback from the PMA receive lane data ports, to the + * transmit lane data ports + * CDR recovered bit clock used + */ + AL_SRDS_LB_MODE_PMA_PARALLEL_RX_TO_TX, + + /** Loops received data after elastic buffer to transmit path */ + AL_SRDS_LB_MODE_PCS_PIPE, + + /** Loops TX data (to PMA) to RX path (instead of PMA data) */ + AL_SRDS_LB_MODE_PCS_NEAR_END, + + /** Loops receive data prior to interface block to transmit path */ + AL_SRDS_LB_MODE_PCS_FAR_END, +}; + +/** Serdes BIST pattern */ +enum al_serdes_bist_pattern { + AL_SRDS_BIST_PATTERN_USER, + AL_SRDS_BIST_PATTERN_PRBS7, + AL_SRDS_BIST_PATTERN_PRBS23, + AL_SRDS_BIST_PATTERN_PRBS31, + AL_SRDS_BIST_PATTERN_CLK1010, +}; + +/** SerDes group rate */ +enum al_serdes_rate { + AL_SRDS_RATE_1_8, + AL_SRDS_RATE_1_4, + AL_SRDS_RATE_1_2, + AL_SRDS_RATE_FULL, +}; + +/** SerDes power mode */ +enum al_serdes_pm { + AL_SRDS_PM_PD, + AL_SRDS_PM_P2, + AL_SRDS_PM_P1, + AL_SRDS_PM_P0S, + AL_SRDS_PM_P0, +}; + +/** + * Initializes a SERDES object + * + * @param serdes_regs_base + * The SERDES register file base pointer + * + * @param obj + * An allocated, non initialized object context + * + * + * @return 0 if no error found. + * + */ +int al_serdes_handle_init( + void __iomem *serdes_regs_base, + struct al_serdes_obj *obj); + +/** + * SERDES register read + * + * Reads a SERDES register + * + * @param obj + * The object context + * + * @param grp + * The SERDES group + * + * @param page + * The SERDES register page within the group + * + * @param type + * The SERDES register type (PMA /PCS) + * + * @param offset + * The SERDES register offset (0 - 4095) + * + * @param data + * The read data + * + * + * @return 0 if no error found. + * + */ +int al_serdes_reg_read( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_reg_page page, + enum al_serdes_reg_type type, + uint16_t offset, + uint8_t *data); + +/** + * SERDES register write + * + * Writes a SERDES register + * + * @param obj + * The object context + * + * @param grp + * The SERDES group + * + * @param page + * The SERDES register page within the group + * + * @param type + * The SERDES register type (PMA /PCS) + * + * @param offset + * The SERDES register offset (0 - 4095) + * + * @param data + * The data to write + * + * + * @return 0 if no error found. + * + */ +int al_serdes_reg_write( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_reg_page page, + enum al_serdes_reg_type type, + uint16_t offset, + uint8_t data); + +/** + * Enable BIST required overrides + * + * @param obj + * The object context + * @param grp + * The SERDES group + * @param rate + * The required speed rate + */ +void al_serdes_bist_overrides_enable( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_rate rate); + +/** + * SERDES group power mode control + * + * @param obj + * The object context + * @param grp + * The SERDES group + * @param pm + * The required power mode + */ +void al_serdes_group_pm_set( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_pm pm); + +/** + * SERDES lane power mode control + * + * @param obj + * The object context + * @param grp + * The SERDES group + * @param lane + * The SERDES lane within the group + * @param rx_pm + * The required RX power mode + * @param tx_pm + * The required TX power mode + */ +void al_serdes_lane_pm_set( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + enum al_serdes_pm rx_pm, + enum al_serdes_pm tx_pm); + +/** + * SERDES group PMA hard reset + * + * Controls Serdes group PMA hard reset + * + * @param obj + * The object context + * + * @param grp + * The SERDES group + * + * @param enable + * Enable/disable hard reset + */ +void al_serdes_pma_hard_reset_group( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + al_bool enable); + +/** + * SERDES lane PMA hard reset + * + * Controls Serdes lane PMA hard reset + * + * @param obj + * The object context + * + * @param grp + * The SERDES group + * + * @param lane + * The SERDES lane within the group + * + * @param enable + * Enable/disable hard reset + */ +void al_serdes_pma_hard_reset_lane( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + al_bool enable); + +/** + * SERDES loopback control + * + * Controls the loopback + * + * @param obj + * The object context + * + * @param grp + * The SERDES group + * + * @param lane + * The SERDES lane within the group + * + * @param mode + * The requested loopback mode + * + */ +void al_serdes_loopback_control( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + enum al_serdes_lb_mode mode); + +/** + * SERDES BIST pattern selection + * + * Selects the BIST pattern to be used + * + * @param obj + * The object context + * + * @param grp + * The SERDES group + * + * @param pattern + * The pattern to set + * + * @param user_data + * The pattern user data (when pattern == AL_SRDS_BIST_PATTERN_USER) + * 80 bits (8 bytes array) + * + */ +void al_serdes_bist_pattern_select( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_bist_pattern pattern, + uint8_t *user_data); + +/** + * SERDES BIST TX Enable + * + * Enables/disables TX BIST per lane + * + * @param obj + * The object context + * + * @param grp + * The SERDES group + * + * @param lane + * The SERDES lane within the group + * + * @param enable + * Enable or disable TX BIST + */ +void al_serdes_bist_tx_enable( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + al_bool enable); + +/** + * SERDES BIST TX single bit error injection + * + * Injects single bit error during a TX BIST + * + * @param obj + * The object context + * + * @param grp + * The SERDES group + */ +void al_serdes_bist_tx_err_inject( + struct al_serdes_obj *obj, + enum al_serdes_group grp); + +/** + * SERDES BIST RX Enable + * + * Enables/disables RX BIST per lane + * + * @param obj + * The object context + * + * @param grp + * The SERDES group + * + * @param lane + * The SERDES lane within the group + * + * @param enable + * Enable or disable TX BIST + */ +void al_serdes_bist_rx_enable( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + al_bool enable); + +/** + * SERDES BIST RX status + * + * Checks the RX BIST status for a specific SERDES lane + * + * @param obj + * The object context + * + * @param grp + * The SERDES group + * + * @param lane + * The SERDES lane within the group + * + * @param is_locked + * An indication whether RX BIST is locked + * + * @param err_cnt_overflow + * An indication whether error count overflow occured + * + * @param err_cnt + * Current bit error count + */ +void al_serdes_bist_rx_status( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + al_bool *is_locked, + al_bool *err_cnt_overflow, + uint16_t *err_cnt); + +/** + * SERDES Digital Test Bus + * + * Samples the digital test bus of a specific SERDES lane + * + * @param obj + * The object context + * + * @param grp + * The SERDES group + * + * @param lane + * The SERDES lane within the group + * + * @param sel + * The selected sampling group (0 - 31) + * + * @param sampled_data + * The sampled data (5 bytes array) + * + * + * @return 0 if no error found. + * + */ +int al_serdes_digital_test_bus( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + uint8_t sel, + uint8_t *sampled_data); + + +/* KR link training */ +/** + * Set the tx de-emphasis to preset values + * + * @param obj The object context + * + * @param grp The SERDES group + * + * @param lane The SERDES lane within the group + * + */ +void al_serdes_tx_deemph_preset( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane); + +/** + * Tx de-emphasis parameters + */ +enum al_serdes_tx_deemph_param { + AL_SERDES_TX_DEEMP_C_ZERO, /*< c(0) */ + AL_SERDES_TX_DEEMP_C_PLUS, /*< c(1) */ + AL_SERDES_TX_DEEMP_C_MINUS, /*< c(-1) */ +}; + +/** + * Increase tx de-emphasis param. + * + * @param obj The object context + * + * @param grp The SERDES group + * + * @param lane The SERDES lane within the group + * + * @param param which tx de-emphasis to change + * + * @return false in case max is reached. true otherwise. + */ +al_bool al_serdes_tx_deemph_inc( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + enum al_serdes_tx_deemph_param param); + +/** + * Decrease tx de-emphasis param. + * + * @param obj The object context + * + * @param grp The SERDES group + * + * @param lane The SERDES lane within the group + * + * @param param which tx de-emphasis to change + * + * @return false in case min is reached. true otherwise. + */ +al_bool al_serdes_tx_deemph_dec( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + enum al_serdes_tx_deemph_param param); + +/** + * run Rx eye measurement. + * + * @param obj The object context + * + * @param grp The SERDES group + * + * @param lane The SERDES lane within the group + * + * @param timeout timeout in uSec + * + * @param value Rx eye measurement value + * (0 - completely closed eye, 0xffff - completely open eye). + * + * @return 0 if no error found. + */ +int al_serdes_eye_measure_run( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + uint32_t timeout, + unsigned int *value); + +/** + * Eye diagram single sampling + * + * @param obj The object context + * + * @param grp The SERDES group + * + * @param lane The SERDES lane within the group + * + * @param x Sampling X position (0 - 63 --> -1.00 UI ... 1.00 UI) + * + * @param y Sampling Y position (0 - 62 --> 500mV ... -500mV) + * + * @param timeout timeout in uSec + * + * @param value Eye diagram sample value (BER - 0x0000 - 0xffff) + * + * @return 0 if no error found. + */ +int al_serdes_eye_diag_sample( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + unsigned int x, + int y, + unsigned int timeout, + unsigned int *value); + +/** + * Check if signal is detected + * + * @param obj The object context + * + * @param grp The SERDES group + * + * @param lane The SERDES lane within the group + * + * @return true if signal is detected. false otherwise. + */ +al_bool al_serdes_signal_is_detected( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane); + + +struct al_serdes_adv_tx_params { + /* + * select the input values location. + * When set to true the values will be taken from the internal registers + * that will be override with the next following parameters. + * When set to false the values will be taken from external pins (the + * other parameters in this case is not needed) + */ + al_bool override; + /* + * Transmit Amplitude control signal. Used to define the full-scale + * maximum swing of the driver. + * 000 - Not Supported + * 001 - 952mVdiff-pkpk + * 010 - 1024mVdiff-pkpk + * 011 - 1094mVdiff-pkpk + * 100 - 1163mVdiff-pkpk + * 101 - 1227mVdiff-pkpk + * 110 - 1283mVdiff-pkpk + * 111 - 1331mVdiff-pkpk + */ + uint8_t amp; + /* Defines the total number of driver units allocated in the driver */ + uint8_t total_driver_units; + /* Defines the total number of driver units allocated to the + * first post-cursor (C+1) tap. */ + uint8_t c_plus_1; + /* Defines the total number of driver units allocated to the + * second post-cursor (C+2) tap. */ + uint8_t c_plus_2; + /* Defines the total number of driver units allocated to the + * first pre-cursor (C-1) tap. */ + uint8_t c_minus_1; + /* TX driver Slew Rate control: + * 00 - 31ps + * 01 - 33ps + * 10 - 68ps + * 11 - 170ps + */ + uint8_t slew_rate; +}; + +struct al_serdes_adv_rx_params { + /* + * select the input values location. + * When set to true the values will be taken from the internal registers + * that will be override with the next following parameters. + * When set to false the values will be taken based in the equalization + * results (the other parameters in this case is not needed) + */ + al_bool override; + /* RX agc high frequency dc gain: + * -3'b000: -3dB + * -3'b001: -2.5dB + * -3'b010: -2dB + * -3'b011: -1.5dB + * -3'b100: -1dB + * -3'b101: -0.5dB + * -3'b110: -0dB + * -3'b111: 0.5dB + */ + uint8_t dcgain; + /* DFE post-shaping tap 3dB frequency + * -3'b000: 684MHz + * -3'b001: 576MHz + * -3'b010: 514MHz + * -3'b011: 435MHz + * -3'b100: 354MHz + * -3'b101: 281MHz + * -3'b110: 199MHz + * -3'b111: 125MHz + */ + uint8_t dfe_3db_freq; + /* DFE post-shaping tap gain + * 0: no pulse shaping tap + * 1: -24mVpeak + * 2: -45mVpeak + * 3: -64mVpeak + * 4: -80mVpeak + * 5: -93mVpeak + * 6: -101mVpeak + * 7: -105mVpeak + */ + uint8_t dfe_gain; + /* DFE first tap gain control + * -4'b0000: +1mVpeak + * -4'b0001: +10mVpeak + * .... + * -4'b0110: +55mVpeak + * -4'b0111: +64mVpeak + * -4'b1000: -1mVpeak + * -4'b1001: -10mVpeak + * .... + * -4'b1110: -55mVpeak + * -4'b1111: -64mVpeak + */ + uint8_t dfe_first_tap_ctrl; + /* DFE second tap gain control + * -4'b0000: +0mVpeak + * -4'b0001: +9mVpeak + * .... + * -4'b0110: +46mVpeak + * -4'b0111: +53mVpeak + * -4'b1000: -0mVpeak + * -4'b1001: -9mVpeak + * .... + * -4'b1110: -46mVpeak + * -4'b1111: -53mVpeak + */ + uint8_t dfe_secound_tap_ctrl; + /* DFE third tap gain control + * -4'b0000: +0mVpeak + * -4'b0001: +7mVpeak + * .... + * -4'b0110: +38mVpeak + * -4'b0111: +44mVpeak + * -4'b1000: -0mVpeak + * -4'b1001: -7mVpeak + * .... + * -4'b1110: -38mVpeak + * -4'b1111: -44mVpeak + */ + uint8_t dfe_third_tap_ctrl; + /* DFE fourth tap gain control + * -4'b0000: +0mVpeak + * -4'b0001: +6mVpeak + * .... + * -4'b0110: +29mVpeak + * -4'b0111: +33mVpeak + * -4'b1000: -0mVpeak + * -4'b1001: -6mVpeak + * .... + * -4'b1110: -29mVpeak + * -4'b1111: -33mVpeak + */ + uint8_t dfe_fourth_tap_ctrl; + /* Low frequency agc gain (att) select + * -3'b000: Disconnected + * -3'b001: -18.5dB + * -3'b010: -12.5dB + * -3'b011: -9dB + * -3'b100: -6.5dB + * -3'b101: -4.5dB + * -3'b110: -2.9dB + * -3'b111: -1.6dB + */ + uint8_t low_freq_agc_gain; + /* Provides a RX Equalizer pre-hint, prior to beginning + * adaptive equalization */ + uint8_t precal_code_sel; + /* High frequency agc boost control + * Min d0: Boost ~4dB + * Max d31: Boost ~20dB + */ + uint8_t high_freq_agc_boost; +}; + +/** + * configure tx advanced parameters + * + * @param obj The object context + * + * @param grp The SERDES group + * + * @param lane The SERDES lane within the group + * + * @param params pointer to the tx parameters + */ +void al_serdes_tx_advanced_params_set(struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + struct al_serdes_adv_tx_params *params); + +/** + * read tx advanced parameters + * + * @param obj The object context + * + * @param grp The SERDES group + * + * @param lane The SERDES lane within the group + * + * @param params pointer to the tx parameters + */ +void al_serdes_tx_advanced_params_get(struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + struct al_serdes_adv_tx_params *params); + +/** + * configure rx advanced parameters + * + * @param obj The object context + * + * @param grp The SERDES group + * + * @param lane The SERDES lane within the group + * + * @param params pointer to the rx parameters + */ +void al_serdes_rx_advanced_params_set(struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + struct al_serdes_adv_rx_params *params); + +/** + * read rx advanced parameters + * + * @param obj The object context + * + * @param grp The SERDES group + * + * @param lane The SERDES lane within the group + * + * @param params pointer to the rx parameters + */ +void al_serdes_rx_advanced_params_get(struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + struct al_serdes_adv_rx_params* params); + +/** + * Switch entire SerDes group to SGMII mode based on 156.25 Mhz reference clock + * + * @param obj The object context + * + * @param grp The SERDES group + */ +void al_serdes_mode_set_sgmii( + struct al_serdes_obj *obj, + enum al_serdes_group grp); + +/** + * Switch entire SerDes group to KR mode based on 156.25 Mhz reference clock + * + * @param obj The object context + * + * @param grp The SERDES group + */ +void al_serdes_mode_set_kr( + struct al_serdes_obj *obj, + enum al_serdes_group grp); + +/** + * performs SerDes HW equalization test and update equalization parameters + * + * @param obj the object context + * + * @param grp the SERDES group + * + * @param lane The SERDES lane within the group + */ +int al_serdes_rx_equalization( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane); + +/** + * performs Rx equalization and compute the width and height of the eye + * + * @param obj the object context + * + * @param grp the SERDES group + * + * @param lane The SERDES lane within the group + * + * @param width the output width of the eye + * + * @param height the output height of the eye + */ +int al_serdes_calc_eye_size( + struct al_serdes_obj *obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + int* width, + int* height); + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif + +/* *INDENT-ON* */ +#endif /* __AL_SRDS__ */ + +/** @} end of SERDES group */ + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_serdes_internal_regs.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_serdes_internal_regs.h new file mode 100644 index 00000000000000..ff9bef67c95e36 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_serdes_internal_regs.h @@ -0,0 +1,652 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ +#ifndef __AL_SERDES_INTERNAL_REGS_H__ +#define __AL_SERDES_INTERNAL_REGS_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************* + * Per lane register fields + ******************************************************************************/ +/* + * RX and TX lane hard reset + * 0 - Hard reset is asserted + * 1 - Hard reset is de-asserted + */ +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_REG_NUM 2 +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_MASK 0x01 +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_ASSERT 0x00 +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_DEASSERT 0x01 + +/* + * RX and TX lane hard reset control + * 0 - Hard reset is taken from the interface pins + * 1 - Hard reset is taken from registers + */ +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_REG_NUM 2 +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_MASK 0x02 +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_IFACE 0x00 +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_REGS 0x02 + +/* RX lane power state control */ +#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_REG_NUM 3 +#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_MASK 0x1f +#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_PD 0x01 +#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P2 0x02 +#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P1 0x04 +#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0S 0x08 +#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0 0x10 + +/* TX lane power state control */ +#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_REG_NUM 4 +#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_MASK 0x1f +#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_PD 0x01 +#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P2 0x02 +#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P1 0x04 +#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0S 0x08 +#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0 0x10 + +/* RX lane word width */ +#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_REG_NUM 5 +#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_MASK 0x07 +#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_8 0x00 +#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_10 0x01 +#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_16 0x02 +#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_20 0x03 +#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_32 0x04 +#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_40 0x05 + +/* TX lane word width */ +#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_REG_NUM 5 +#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_MASK 0x70 +#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_8 0x00 +#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_10 0x10 +#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_16 0x20 +#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_20 0x30 +#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_32 0x40 +#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_40 0x50 + +/* RX lane rate select */ +#define SERDES_IREG_FLD_PCSRX_DIVRATE_REG_NUM 6 +#define SERDES_IREG_FLD_PCSRX_DIVRATE_MASK 0x07 +#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_8 0x00 +#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_4 0x01 +#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_2 0x02 +#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1 0x03 + +/* TX lane rate select */ +#define SERDES_IREG_FLD_PCSTX_DIVRATE_REG_NUM 6 +#define SERDES_IREG_FLD_PCSTX_DIVRATE_MASK 0x70 +#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_8 0x00 +#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_4 0x10 +#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_2 0x20 +#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_1 0x30 + +/* + * PMA serial RX-to-TX loop-back enable (from AGC to IO Driver). Serial receive + * to transmit loopback: 0 - Disables loopback 1 - Transmits the untimed, + * partial equalized RX signal out the transmit IO pins + */ +#define SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN_REG_NUM 7 +#define SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN 0x10 + +/* + * PMA TX-to-RX buffered serial loop-back enable (bypasses IO Driver). Serial + * transmit to receive buffered loopback: 0 - Disables loopback 1 - Loops back + * the TX serializer output into the CDR + */ +#define SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN_REG_NUM 7 +#define SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN 0x20 + +/* + * PMA TX-to-RX I/O serial loop-back enable (loop back done directly from TX to + * RX pads). Serial IO loopback from the transmit lane IO pins to the receive + * lane IO pins: 0 - Disables loopback 1 - Loops back the driver IO signal to + * the RX IO pins + */ +#define SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN_REG_NUM 7 +#define SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN 0x40 + +/* + * PMA Parallel RX-to-TX loop-back enable. Parallel loopback from the PMA + * receive lane 20-bit data ports, to the transmit lane 20-bit data ports 0 - + * Disables loopback 1 - Loops back the 20-bit receive data port to the + * transmitter + */ +#define SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN_REG_NUM 7 +#define SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN 0x80 + +/* + * PMA CDR recovered-clock loopback enable; asserted when PARRX2TXTIMEDEN is 1. + * Transmit bit clock select: 0 - Selects synthesizer bit clock for transmit 1 + * - Selects CDR clock for transmit + */ +#define SERDES_IREG_FLD_LB_CDRCLK2TXEN_REG_NUM 7 +#define SERDES_IREG_FLD_LB_CDRCLK2TXEN 0x01 + +/* Receive lane BIST enable. Active High */ +#define SERDES_IREG_FLD_PCSRXBIST_EN_REG_NUM 8 +#define SERDES_IREG_FLD_PCSRXBIST_EN 0x01 + +/* TX lane BIST enable. Active High */ +#define SERDES_IREG_FLD_PCSTXBIST_EN_REG_NUM 8 +#define SERDES_IREG_FLD_PCSTXBIST_EN 0x02 + +/* + * RX BIST completion signal 0 - Indicates test is not completed 1 - Indicates + * the test has completed, and will remain high until a new test is initiated + */ +#define SERDES_IREG_FLD_RXBIST_DONE_REG_NUM 8 +#define SERDES_IREG_FLD_RXBIST_DONE 0x04 + +/* + * RX BIST error count overflow indicator. Indicates an overflow in the number + * of byte errors identified during the course of the test. This word is stable + * to sample when *_DONE_* signal has asserted + */ +#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW_REG_NUM 8 +#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW 0x08 + +/* + * RX BIST locked indicator 0 - Indicates BIST is not word locked and error + * comparisons have not begun yet 1 - Indicates BIST is word locked and error + * comparisons have begun + */ +#define SERDES_IREG_FLD_RXBIST_RXLOCKED_REG_NUM 8 +#define SERDES_IREG_FLD_RXBIST_RXLOCKED 0x10 + +/* + * RX BIST error count word. Indicates the number of byte errors identified + * during the course of the test. This word is stable to sample when *_DONE_* + * signal has asserted + */ +#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_MSB_REG_NUM 9 +#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_LSB_REG_NUM 10 + +/* Tx params */ +#define SERDES_IREG_TX_DRV_1_REG_NUM 21 +#define SERDES_IREG_TX_DRV_1_HLEV_MASK 0x7 +#define SERDES_IREG_TX_DRV_1_HLEV_SHIFT 0 +#define SERDES_IREG_TX_DRV_1_LEVN_MASK 0xf8 +#define SERDES_IREG_TX_DRV_1_LEVN_SHIFT 3 + +#define SERDES_IREG_TX_DRV_2_REG_NUM 22 +#define SERDES_IREG_TX_DRV_2_LEVNM1_MASK 0xf +#define SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT 0 +#define SERDES_IREG_TX_DRV_2_LEVNM2_MASK 0x30 +#define SERDES_IREG_TX_DRV_2_LEVNM2_SHIFT 4 + +#define SERDES_IREG_TX_DRV_3_REG_NUM 23 +#define SERDES_IREG_TX_DRV_3_LEVNP1_MASK 0x7 +#define SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT 0 +#define SERDES_IREG_TX_DRV_3_SLEW_MASK 0x18 +#define SERDES_IREG_TX_DRV_3_SLEW_SHIFT 3 + +/* Rx params */ +#define SERDES_IREG_RX_CALEQ_1_REG_NUM 24 +#define SERDES_IREG_RX_CALEQ_1_DCGAIN_MASK 0x7 +#define SERDES_IREG_RX_CALEQ_1_DCGAIN_SHIFT 0 +/* DFE post-shaping tap 3dB frequency */ +#define SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_MASK 0x38 +#define SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_SHIFT 3 + +#define SERDES_IREG_RX_CALEQ_2_REG_NUM 25 +/* DFE post-shaping tap gain */ +#define SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_MASK 0x7 +#define SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_SHIFT 0 +/* DFE first tap gain control */ +#define SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_MASK 0x78 +#define SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_SHIFT 3 + +#define SERDES_IREG_RX_CALEQ_3_REG_NUM 26 +#define SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_MASK 0xf +#define SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_SHIFT 0 +#define SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_MASK 0xf0 +#define SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_SHIFT 4 + +#define SERDES_IREG_RX_CALEQ_4_REG_NUM 27 +#define SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_MASK 0xf +#define SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_SHIFT 0 +#define SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_MASK 0x70 +#define SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_SHIFT 4 + +#define SERDES_IREG_RX_CALEQ_5_REG_NUM 28 +#define SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_MASK 0x7 +#define SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_SHIFT 0 +#define SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_MASK 0xf8 +#define SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_SHIFT 3 + +/* RX lane best eye point measurement result */ +#define SERDES_IREG_RXEQ_BEST_EYE_MSB_VAL_REG_NUM 29 +#define SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_REG_NUM 30 +#define SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_MASK 0x3F + +/* + * Adaptive RX Equalization enable + * 0 - Disables adaptive RX equalization. + * 1 - Enables adaptive RX equalization. + */ +#define SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM 31 +#define SERDES_IREG_FLD_PCSRXEQ_START (1 << 0) + +/* + * Enables an eye diagram measurement + * within the PHY. + * 0 - Disables eye diagram measurement + * 1 - Enables eye diagram measurement + */ +#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM 31 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START (1 << 1) + + +/* + * RX lane single roam eye point measurement start signal. + * If asserted, single measurement at fix XADJUST and YADJUST is started. + */ +#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_REG_NUM 31 +#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START (1 << 2) + + +/* + * PHY Eye diagram measurement status + * signal + * 0 - Indicates eye diagram results are not + * valid for sampling + * 1 - Indicates eye diagram is complete and + * results are valid for sampling + */ +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE_REG_NUM 32 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE (1 << 0) + +/* + * Eye diagram error signal. Indicates if the + * measurement was invalid because the eye + * diagram was interrupted by the link entering + * electrical idle. + * 0 - Indicates eye diagram is valid + * 1- Indicates an error occurred, and the eye + * diagram measurement should be re-run + */ +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR_REG_NUM 32 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR (1 << 1) + +/* + * PHY Adaptive Equalization status + * 0 - Indicates Adaptive Equalization results are not valid for sampling + * 1 - Indicates Adaptive Equalization is complete and results are valid for + * sampling + */ +#define SERDES_IREG_FLD_RXCALROAMEYEMEASDONE_REG_NUM 32 +#define SERDES_IREG_FLD_RXCALROAMEYEMEASDONE (1 << 2) + +/* + * + * PHY Adaptive Equalization Status Signal + * 0 – Indicates adaptive equalization results + * are not valid for sampling + * 1 – Indicates adaptive equalization is + * complete and results are valid for sampling. + */ +#define SERDES_IREG_FLD_RXEQ_DONE_REG_NUM 32 +#define SERDES_IREG_FLD_RXEQ_DONE (1 << 3) + + +/* + * 7-bit eye diagram time adjust control + * - 6-bits per UI + * - spans 2 UI + */ +#define SERDES_IREG_FLD_RXCALROAMXADJUST_REG_NUM 33 + +/* 6-bit eye diagram voltage adjust control - spans +/-300mVdiff */ +#define SERDES_IREG_FLD_RXCALROAMYADJUST_REG_NUM 34 + +/* + * Eye diagram status signal. Safe for + * sampling when *DONE* signal has + * asserted + * 14'h0000 - Completely Closed Eye + * 14'hFFFF - Completely Open Eye + */ +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_REG_NUM 35 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_MAKE 0xFF +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_SHIFT 0 + +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_REG_NUM 36 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_MAKE 0x3F +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_SHIFT 0 + +/* + * RX lane single roam eye point measurement result. + * If 0, eye is open at current XADJUST and YADJUST settings. + */ +#define SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_MSB_REG_NUM 37 +#define SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_LSB_REG_NUM 38 + +/* + * Override enable for CDR lock to reference clock + * 0 - CDR is always locked to reference + * 1 - CDR operation mode (Lock2Reference or Lock2data are controlled internally + * depending on the incoming signal and ppm status) + */ +#define SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM 39 +#define SERDES_IREG_FLD_RXLOCK2REF_OVREN (1 << 1) + +/* + * Selects Eye to capture based on edge + * 0 - Capture 1st Eye in Eye Diagram + * 1 - Capture 2nd Eye in Eye Diagram measurement + */ +#define SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM 39 +#define SERDES_IREG_FLD_RXROAM_XORBITSEL (1 << 2) +#define SERDES_IREG_FLD_RXROAM_XORBITSEL_MASK (1 << 2) +#define SERDES_IREG_FLD_RXROAM_XORBITSEL_1ST 0 +#define SERDES_IREG_FLD_RXROAM_XORBITSEL_2ND (1 << 2) + +/* + * RX Signal detect. 0 indicates no signal, 1 indicates signal detected. + */ +#define SERDES_IREG_FLD_RXRANDET_REG_NUM 41 +#define SERDES_IREG_FLD_RXRANDET_STAT 0x20 + +/* + * RX data polarity inversion control: + * 1'b0: no inversion + * 1'b1: invert polarity + */ +#define SERDES_IREG_FLD_POLARITY_RX_REG_NUM 46 +#define SERDES_IREG_FLD_POLARITY_RX_INV (1 << 0) + +/* + * TX data polarity inversion control: + * 1'b0: no inversion + * 1'b1: invert polarity + */ +#define SERDES_IREG_FLD_POLARITY_TX_REG_NUM 46 +#define SERDES_IREG_FLD_POLARITY_TX_INV (1 << 1) + +/* LANEPCSPSTATE* override enable (Active low) */ +#define SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM 85 +#define SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN (1 << 0) + +/* LB* override enable (Active low) */ +#define SERDES_IREG_FLD_LB_LOCWREN_REG_NUM 85 +#define SERDES_IREG_FLD_LB_LOCWREN (1 << 1) + +/* PCSRX* override enable (Active low) */ +#define SERDES_IREG_FLD_PCSRX_LOCWREN_REG_NUM 85 +#define SERDES_IREG_FLD_PCSRX_LOCWREN (1 << 4) + +/* PCSRXBIST* override enable (Active low) */ +#define SERDES_IREG_FLD_PCSRXBIST_LOCWREN_REG_NUM 85 +#define SERDES_IREG_FLD_PCSRXBIST_LOCWREN (1 << 5) + +/* PCSRXEQ* override enable (Active low) */ +#define SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM 85 +#define SERDES_IREG_FLD_PCSRXEQ_LOCWREN (1 << 6) + +/* PCSTX* override enable (Active low) */ +#define SERDES_IREG_FLD_PCSTX_LOCWREN_REG_NUM 85 +#define SERDES_IREG_FLD_PCSTX_LOCWREN (1 << 7) + +/* + * group registers: + * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN, + * SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN + * SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN + */ +#define SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM 86 + +/* PCSTXBIST* override enable (Active low) */ +#define SERDES_IREG_FLD_PCSTXBIST_LOCWREN_REG_NUM 86 +#define SERDES_IREG_FLD_PCSTXBIST_LOCWREN (1 << 0) + +/* Override RX_CALCEQ through the internal registers (Active low) */ +#define SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM 86 +#define SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN (1 << 3) + +#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM 86 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN (1 << 4) + + +/* RXCALROAMEYEMEASIN* override enable - Active Low */ +#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM 86 +#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN (1 << 6) + +/* RXCALROAMXADJUST* override enable - Active Low */ +#define SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM 86 +#define SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN (1 << 7) + +/* RXCALROAMYADJUST* override enable - Active Low */ +#define SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM 87 +#define SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN (1 << 0) + +/* RXCDRCALFOSC* override enable. Active Low */ +#define SERDES_IREG_FLD_RXCDRCALFOSC_LOCWREN_REG_NUM 87 +#define SERDES_IREG_FLD_RXCDRCALFOSC_LOCWREN (1 << 1) + +/* Over-write enable for RXEYEDIAGFSM_INITXVAL */ +#define SERDES_IREG_FLD_RXEYEDIAGFSM_LOCWREN_REG_NUM 87 +#define SERDES_IREG_FLD_RXEYEDIAGFSM_LOCWREN (1 << 2) + +/* Over-write enable for CMNCLKGENMUXSEL_TXINTERNAL */ +#define SERDES_IREG_FLD_RXTERMHIZ_LOCWREN_REG_NUM 87 +#define SERDES_IREG_FLD_RXTERMHIZ_LOCWREN (1 << 3) + +/* TXCALTCLKDUTY* override enable. Active Low */ +#define SERDES_IREG_FLD_TXCALTCLKDUTY_LOCWREN_REG_NUM 87 +#define SERDES_IREG_FLD_TXCALTCLKDUTY_LOCWREN (1 << 4) + +/* Override TX_DRV through the internal registers (Active low) */ +#define SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM 87 +#define SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN (1 << 5) + +/******************************************************************************* + * Common lane register fields + ******************************************************************************/ +/* + * Common lane hard reset control + * 0 - Hard reset is taken from the interface pins + * 1 - Hard reset is taken from registers + */ +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_REG_NUM 2 +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_MASK 0x01 +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_IFACE 0x00 +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_REGS 0x01 + +/* + * Common lane hard reset + * 0 - Hard reset is asserted + * 1 - Hard reset is de-asserted + */ +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_REG_NUM 2 +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_MASK 0x02 +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_ASSERT 0x00 +#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_DEASSERT 0x02 + +/* Synth power state control */ +#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_REG_NUM 3 +#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_MASK 0x1f +#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_PD 0x01 +#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P2 0x02 +#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P1 0x04 +#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0S 0x08 +#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0 0x10 + +/* Transmit datapath FIFO enable (Active High) */ +#define SERDES_IREG_FLD_CMNPCS_TXENABLE_REG_NUM 8 +#define SERDES_IREG_FLD_CMNPCS_TXENABLE (1 << 2) + +#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_REG_NUM 30 +#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_MASK 0x7f +#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_SHIFT 0 + +#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_REG_NUM 31 +#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_MASK 0x7f +#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_SHIFT 0 + +#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_REG_NUM 32 +#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_MASK 0xff +#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_SHIFT 0 + +#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_REG_NUM 33 +#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_MASK 0x1 +#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_SHIFT 0 + +#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_REG_NUM 33 +#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_MASK 0x3e +#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_SHIFT 1 + +#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_REG_NUM 34 +#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_MASK 0xff +#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_SHIFT 0 + +#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_REG_NUM 35 +#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_MASK 0x1 +#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_SHIFT 0 + +#define SERDES_IREG_FLD_RXEQ_FINE_STEP_REG_NUM 35 +#define SERDES_IREG_FLD_RXEQ_FINE_STEP_MASK 0x3e +#define SERDES_IREG_FLD_RXEQ_FINE_STEP_SHIFT 1 + +#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_REG_NUM 36 +#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_MASK 0xff +#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_SHIFT 0 + +#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_REG_NUM 37 +#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_MASK 0x7 +#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_SHIFT 0 + +#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_REG_NUM 43 +#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_MASK 0x7 +#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_SHIFT 0 + +/* + * Selects the transmit BIST mode: + * 0 - Uses the 80-bit internal memory pattern (w/ OOB) + * 1 - Uses a 27 PRBS pattern + * 2 - Uses a 223 PRBS pattern + * 3 - Uses a 231 PRBS pattern + * 4 - Uses a 1010 clock pattern + * 5 and above - Reserved + */ +#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_REG_NUM 80 +#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_MASK 0x07 +#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_USER 0x00 +#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS7 0x01 +#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS23 0x02 +#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS31 0x03 +#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_CLK1010 0x04 + +/* Single-Bit error injection enable (on posedge) */ +#define SERDES_IREG_FLD_TXBIST_BITERROR_EN_REG_NUM 80 +#define SERDES_IREG_FLD_TXBIST_BITERROR_EN 0x20 + +/* CMNPCIEGEN3* override enable (Active Low) */ +#define SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM 95 +#define SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN (1 << 2) + +/* CMNPCS* override enable (Active Low) */ +#define SERDES_IREG_FLD_CMNPCS_LOCWREN_REG_NUM 95 +#define SERDES_IREG_FLD_CMNPCS_LOCWREN (1 << 3) + +/* CMNPCSBIST* override enable (Active Low) */ +#define SERDES_IREG_FLD_CMNPCSBIST_LOCWREN_REG_NUM 95 +#define SERDES_IREG_FLD_CMNPCSBIST_LOCWREN (1 << 4) + +/* CMNPCSPSTATE* override enable (Active Low) */ +#define SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN_REG_NUM 95 +#define SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN (1 << 5) + +/* PCS_EN* override enable (Active Low) */ +#define SERDES_IREG_FLD_PCS_LOCWREN_REG_NUM 96 +#define SERDES_IREG_FLD_PCS_LOCWREN (1 << 3) + +/* Eye diagram sample count */ +#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM 150 +#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_MASK 0xff +#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_SHIFT 0 + +#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM 151 +#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_MASK 0xff +#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_SHIFT 0 + +/* override control */ +#define SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM 230 +#define SERDES_IREG_FLD_RXLOCK2REF_LOCWREN 1 << 0 + +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_REG_NUM 623 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_MASK 0xff +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_SHIFT 0 + +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_REG_NUM 624 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_MASK 0xff +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_SHIFT 0 + +/* X and Y coefficient return value */ +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM 626 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALWEIGHT_MASK 0x0F +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALWEIGHT_SHIFT 0 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALWEIGHT_MASK 0xF0 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALWEIGHT_SHIFT 4 + +/* X coarse scan step */ +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM 627 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_MASK 0x7F +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_SHIFT 0 + +/* X fine scan step */ +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM 628 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_MASK 0x7F +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_SHIFT 0 + +/* Y coarse scan step */ +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM 629 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_MASK 0x0F +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_SHIFT 0 + +/* Y fine scan step */ +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM 630 +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_MASK 0x0F +#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_SHIFT 0 + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_serdes_REG_H */ + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_serdes_regs.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_serdes_regs.h new file mode 100644 index 00000000000000..0a485bbbc16e29 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_serdes_regs.h @@ -0,0 +1,452 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + + +#ifndef __AL_serdes_REG_H +#define __AL_serdes_REG_H + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + +struct serdes_intc { + uint32_t rsrvd[0x100 / sizeof(uint32_t)]; +}; + +struct serdes_gen { + uint32_t version; /* SERDES registers Version */ + uint32_t rsrvd1[0x0c / sizeof(uint32_t)]; + uint32_t reg_addr; /* SERDES register file address */ + uint32_t reg_data; /* SERDES register file data */ + uint32_t rsrvd2[0x08 / sizeof(uint32_t)]; + uint32_t ictl_multi_bist; /* SERDES control */ + uint32_t ictl_pcs; /* SERDES control */ + uint32_t ictl_pma; /* SERDES control */ + uint32_t rsrvd3; + uint32_t ipd_multi_synth; /* SERDES control */ + uint32_t irst; /* SERDES control */ + uint32_t octl_multi_synthready; /* SERDES control */ + uint32_t octl_multi_synthstatus; /* SERDES control */ + uint32_t clk_out; /* SERDES control */ + uint32_t rsrvd[47]; +}; +struct serdes_lane { + uint32_t rsrvd1[0x10 / sizeof(uint32_t)]; + uint32_t octl_pma; /* SERDES status */ + uint32_t ictl_multi_andme; /* SERDES control */ + uint32_t ictl_multi_lb; /* SERDES control */ + uint32_t ictl_multi_rxbist; /* SERDES control */ + uint32_t ictl_multi_txbist; /* SERDES control */ + uint32_t ictl_multi; /* SERDES control */ + uint32_t ictl_multi_rxeq; /* SERDES control */ + uint32_t ictl_multi_rxeq_l_low; /* SERDES control */ + uint32_t ictl_multi_rxeq_l_high; /* SERDES control */ + uint32_t ictl_multi_rxeyediag; /* SERDES control */ + uint32_t ictl_multi_txdeemph; /* SERDES control */ + uint32_t ictl_multi_txmargin; /* SERDES control */ + uint32_t ictl_multi_txswing; /* SERDES control */ + uint32_t idat_multi; /* SERDES control */ + uint32_t ipd_multi; /* SERDES control */ + uint32_t octl_multi_rxbist; /* SERDES control */ + uint32_t octl_multi; /* SERDES control */ + uint32_t octl_multi_rxeyediag; /* SERDES control */ + uint32_t odat_multi_rxbist; /* SERDES control */ + uint32_t odat_multi_rxeq; /* SERDES control */ + uint32_t multi_rx_dvalid; /* SERDES control */ + uint32_t reserved; /* SERDES control */ + uint32_t rsrvd[6]; +}; + +struct al_serdes_regs { + struct serdes_intc intc; + struct serdes_gen gen; + struct serdes_lane lane[4]; +}; + + +/* +* Registers Fields +*/ + + +/**** version register ****/ +/* Revision number (Minor) */ +#define SERDES_GEN_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF +#define SERDES_GEN_VERSION_RELEASE_NUM_MINOR_SHIFT 0 +/* Revision number (Major) */ +#define SERDES_GEN_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00 +#define SERDES_GEN_VERSION_RELEASE_NUM_MAJOR_SHIFT 8 +/* date of release */ +#define SERDES_GEN_VERSION_DATE_DAY_MASK 0x001F0000 +#define SERDES_GEN_VERSION_DATE_DAY_SHIFT 16 +/* month of release */ +#define SERDES_GEN_VERSION_DATA_MONTH_MASK 0x01E00000 +#define SERDES_GEN_VERSION_DATA_MONTH_SHIFT 21 +/* year of release (starting from 2000) */ +#define SERDES_GEN_VERSION_DATE_YEAR_MASK 0x3E000000 +#define SERDES_GEN_VERSION_DATE_YEAR_SHIFT 25 +/* Reserved */ +#define SERDES_GEN_VERSION_RESERVED_MASK 0xC0000000 +#define SERDES_GEN_VERSION_RESERVED_SHIFT 30 + +/**** reg_addr register ****/ +/* address value */ +#define SERDES_GEN_REG_ADDR_VAL_MASK 0x0000FFFF +#define SERDES_GEN_REG_ADDR_VAL_SHIFT 0 + +/**** reg_data register ****/ +/* data value */ +#define SERDES_GEN_REG_DATA_VAL_MASK 0x000000FF +#define SERDES_GEN_REG_DATA_VAL_SHIFT 0 + +/**** ICTL_MULTI_BIST register ****/ + +#define SERDES_GEN_ICTL_MULTI_BIST_MODESEL_NT_MASK 0x00000007 +#define SERDES_GEN_ICTL_MULTI_BIST_MODESEL_NT_SHIFT 0 + +/**** ICTL_PCS register ****/ + +#define SERDES_GEN_ICTL_PCS_EN_NT (1 << 0) + +/**** ICTL_PMA register ****/ + +#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_MASK 0x00000007 +#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT 0 + +#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_REF \ + (0 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT)) +#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_R2L \ + (3 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT)) +#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_L2R \ + (4 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT)) + +#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_MASK 0x00000070 +#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT 4 + +#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_0 \ + (0 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT)) +#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_REF \ + (2 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT)) +#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_R2L \ + (3 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT)) + +#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_MASK 0x00000700 +#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT 8 + +#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_0 \ + (0 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT)) +#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_REF \ + (2 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT)) +#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_L2R \ + (3 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT)) + +#define SERDES_GEN_ICTL_PMA_TXENABLE_A (1 << 12) + +#define SERDES_GEN_ICTL_PMA_SYNTHCKBYPASSEN_NT (1 << 13) + +/**** IPD_MULTI_SYNTH register ****/ + +#define SERDES_GEN_IPD_MULTI_SYNTH_B (1 << 0) + +/**** IRST register ****/ + +#define SERDES_GEN_IRST_PIPE_RST_L3_B_A (1 << 0) + +#define SERDES_GEN_IRST_PIPE_RST_L2_B_A (1 << 1) + +#define SERDES_GEN_IRST_PIPE_RST_L1_B_A (1 << 2) + +#define SERDES_GEN_IRST_PIPE_RST_L0_B_A (1 << 3) + +#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L3_B_A (1 << 4) + +#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L2_B_A (1 << 5) + +#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L1_B_A (1 << 6) + +#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L0_B_A (1 << 7) + +#define SERDES_GEN_IRST_MULTI_HARD_SYNTH_B_A (1 << 8) + +#define SERDES_GEN_IRST_POR_B_A (1 << 12) + +#define SERDES_GEN_IRST_PIPE_RST_L3_B_A_SEL (1 << 16) + +#define SERDES_GEN_IRST_PIPE_RST_L2_B_A_SEL (1 << 17) + +#define SERDES_GEN_IRST_PIPE_RST_L1_B_A_SEL (1 << 18) + +#define SERDES_GEN_IRST_PIPE_RST_L0_B_A_SEL (1 << 19) + +#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L3_B_A_SEL (1 << 20) + +#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L2_B_A_SEL (1 << 21) + +#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L1_B_A_SEL (1 << 22) + +#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L0_B_A_SEL (1 << 23) + +/**** OCTL_MULTI_SYNTHREADY register ****/ + +#define SERDES_GEN_OCTL_MULTI_SYNTHREADY_A (1 << 0) + +/**** OCTL_MULTI_SYNTHSTATUS register ****/ + +#define SERDES_GEN_OCTL_MULTI_SYNTHSTATUS_A (1 << 0) + +/**** clk_out register ****/ + +#define SERDES_GEN_CLK_OUT_SEL_MASK 0x0000003F +#define SERDES_GEN_CLK_OUT_SEL_SHIFT 0 + +/**** OCTL_PMA register ****/ + +#define SERDES_LANE_OCTL_PMA_TXSTATUS_L_A (1 << 0) + +/**** ICTL_MULTI_ANDME register ****/ + +#define SERDES_LANE_ICTL_MULTI_ANDME_EN_L_A (1 << 0) + +#define SERDES_LANE_ICTL_MULTI_ANDME_EN_L_A_SEL (1 << 1) + +/**** ICTL_MULTI_LB register ****/ + +#define SERDES_LANE_ICTL_MULTI_LB_TX2RXIOTIMEDEN_L_NT (1 << 0) + +#define SERDES_LANE_ICTL_MULTI_LB_TX2RXBUFTIMEDEN_L_NT (1 << 1) + +#define SERDES_LANE_ICTL_MULTI_LB_RX2TXUNTIMEDEN_L_NT (1 << 2) + +#define SERDES_LANE_ICTL_MULTI_LB_PARRX2TXTIMEDEN_L_NT (1 << 3) + +#define SERDES_LANE_ICTL_MULTI_LB_CDRCLK2TXEN_L_NT (1 << 4) + +#define SERDES_LANE_ICTL_MULTI_LB_TX2RXBUFTIMEDEN_L_NT_SEL (1 << 8) + +#define SERDES_LANE_ICTL_MULTI_LB_RX2TXUNTIMEDEN_L_NT_SEL (1 << 9) + +/**** ICTL_MULTI_RXBIST register ****/ + +#define SERDES_LANE_ICTL_MULTI_RXBIST_EN_L_A (1 << 0) + +/**** ICTL_MULTI_TXBIST register ****/ + +#define SERDES_LANE_ICTL_MULTI_TXBIST_EN_L_A (1 << 0) + +/**** ICTL_MULTI register ****/ + +#define SERDES_LANE_ICTL_MULTI_PSTATE_L_MASK 0x00000003 +#define SERDES_LANE_ICTL_MULTI_PSTATE_L_SHIFT 0 + +#define SERDES_LANE_ICTL_MULTI_PSTATE_L_SEL (1 << 2) + +#define SERDES_LANE_ICTL_MULTI_RXDATAWIDTH_L_MASK 0x00000070 +#define SERDES_LANE_ICTL_MULTI_RXDATAWIDTH_L_SHIFT 4 + +#define SERDES_LANE_ICTL_MULTI_RXOVRCDRLOCK2DATAEN_L_A (1 << 8) + +#define SERDES_LANE_ICTL_MULTI_RXOVRCDRLOCK2DATA_L_A (1 << 9) + +#define SERDES_LANE_ICTL_MULTI_TXBEACON_L_A (1 << 12) + +#define SERDES_LANE_ICTL_MULTI_TXDETECTRXREQ_L_A (1 << 13) + +#define SERDES_LANE_ICTL_MULTI_RXRATE_L_MASK 0x00070000 +#define SERDES_LANE_ICTL_MULTI_RXRATE_L_SHIFT 16 + +#define SERDES_LANE_ICTL_MULTI_RXRATE_L_SEL (1 << 19) + +#define SERDES_LANE_ICTL_MULTI_TXRATE_L_MASK 0x00700000 +#define SERDES_LANE_ICTL_MULTI_TXRATE_L_SHIFT 20 + +#define SERDES_LANE_ICTL_MULTI_TXRATE_L_SEL (1 << 23) + +#define SERDES_LANE_ICTL_MULTI_TXAMP_L_MASK 0x07000000 +#define SERDES_LANE_ICTL_MULTI_TXAMP_L_SHIFT 24 + +#define SERDES_LANE_ICTL_MULTI_TXAMP_EN_L (1 << 27) + +#define SERDES_LANE_ICTL_MULTI_TXDATAWIDTH_L_MASK 0x70000000 +#define SERDES_LANE_ICTL_MULTI_TXDATAWIDTH_L_SHIFT 28 + +/**** ICTL_MULTI_RXEQ register ****/ + +#define SERDES_LANE_ICTL_MULTI_RXEQ_EN_L (1 << 0) + +#define SERDES_LANE_ICTL_MULTI_RXEQ_START_L_A (1 << 1) + +#define SERDES_LANE_ICTL_MULTI_RXEQ_PRECAL_CODE_SEL_MASK 0x00000070 +#define SERDES_LANE_ICTL_MULTI_RXEQ_PRECAL_CODE_SEL_SHIFT 4 + +/**** ICTL_MULTI_RXEQ_L_high register ****/ + +#define SERDES_LANE_ICTL_MULTI_RXEQ_L_HIGH_VAL (1 << 0) + +/**** ICTL_MULTI_RXEYEDIAG register ****/ + +#define SERDES_LANE_ICTL_MULTI_RXEYEDIAG_START_L_A (1 << 0) + +/**** ICTL_MULTI_TXDEEMPH register ****/ + +#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_L_MASK 0x0003FFFF +#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_L_SHIFT 0 + +#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_ZERO_MASK 0x7c0 +#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_ZERO_SHIFT 6 +#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_PLUS_MASK 0xf000 +#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_PLUS_SHIFT 12 +#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_MINUS_MASK 0x7 +#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_MINUS_SHIFT 0 + +/**** ICTL_MULTI_TXMARGIN register ****/ + +#define SERDES_LANE_ICTL_MULTI_TXMARGIN_L_MASK 0x00000007 +#define SERDES_LANE_ICTL_MULTI_TXMARGIN_L_SHIFT 0 + +/**** ICTL_MULTI_TXSWING register ****/ + +#define SERDES_LANE_ICTL_MULTI_TXSWING_L (1 << 0) + +/**** IDAT_MULTI register ****/ + +#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_MASK 0x0000000F +#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_SHIFT 0 + +#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_SEL (1 << 4) + +/**** IPD_MULTI register ****/ + +#define SERDES_LANE_IPD_MULTI_TX_L_B (1 << 0) + +#define SERDES_LANE_IPD_MULTI_RX_L_B (1 << 1) + +/**** OCTL_MULTI_RXBIST register ****/ + +#define SERDES_LANE_OCTL_MULTI_RXBIST_DONE_L_A (1 << 0) + +#define SERDES_LANE_OCTL_MULTI_RXBIST_RXLOCKED_L_A (1 << 1) + +/**** OCTL_MULTI register ****/ + +#define SERDES_LANE_OCTL_MULTI_RXCDRLOCK2DATA_L_A (1 << 0) + +#define SERDES_LANE_OCTL_MULTI_RXEQ_DONE_L_A (1 << 1) + +#define SERDES_LANE_OCTL_MULTI_RXREADY_L_A (1 << 2) + +#define SERDES_LANE_OCTL_MULTI_RXSTATUS_L_A (1 << 3) + +#define SERDES_LANE_OCTL_MULTI_TXREADY_L_A (1 << 4) + +#define SERDES_LANE_OCTL_MULTI_TXDETECTRXSTAT_L_A (1 << 5) + +#define SERDES_LANE_OCTL_MULTI_TXDETECTRXACK_L_A (1 << 6) + +#define SERDES_LANE_OCTL_MULTI_RXSIGNALDETECT_L_A (1 << 7) + +/**** OCTL_MULTI_RXEYEDIAG register ****/ + +#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_STAT_L_A_MASK 0x00003FFF +#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_STAT_L_A_SHIFT 0 + +#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_DONE_L_A (1 << 16) + +#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_ERR_L_A (1 << 17) + +/**** ODAT_MULTI_RXBIST register ****/ + +#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_L_A_MASK 0x0000FFFF +#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_L_A_SHIFT 0 + +#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_OVERFLOW_L_A (1 << 16) + +/**** ODAT_MULTI_RXEQ register ****/ + +#define SERDES_LANE_ODAT_MULTI_RXEQ_BEST_EYE_VAL_L_A_MASK 0x00003FFF +#define SERDES_LANE_ODAT_MULTI_RXEQ_BEST_EYE_VAL_L_A_SHIFT 0 + +/**** MULTI_RX_DVALID register ****/ + +#define SERDES_LANE_MULTI_RX_DVALID_MASK_CDR_LOCK (1 << 0) + +#define SERDES_LANE_MULTI_RX_DVALID_MASK_SIGNALDETECT (1 << 1) + +#define SERDES_LANE_MULTI_RX_DVALID_MASK_TX_READY (1 << 2) + +#define SERDES_LANE_MULTI_RX_DVALID_MASK_RX_READY (1 << 3) + +#define SERDES_LANE_MULTI_RX_DVALID_MASK_SYNT_READY (1 << 4) + +#define SERDES_LANE_MULTI_RX_DVALID_MASK_RX_ELECIDLE (1 << 5) + +#define SERDES_LANE_MULTI_RX_DVALID_MUX_SEL_MASK 0x00FF0000 +#define SERDES_LANE_MULTI_RX_DVALID_MUX_SEL_SHIFT 16 + +#define SERDES_LANE_MULTI_RX_DVALID_PS_00_SEL (1 << 24) + +#define SERDES_LANE_MULTI_RX_DVALID_PS_00_VAL (1 << 25) + +#define SERDES_LANE_MULTI_RX_DVALID_PS_01_SEL (1 << 26) + +#define SERDES_LANE_MULTI_RX_DVALID_PS_01_VAL (1 << 27) + +#define SERDES_LANE_MULTI_RX_DVALID_PS_10_SEL (1 << 28) + +#define SERDES_LANE_MULTI_RX_DVALID_PS_10_VAL (1 << 29) + +#define SERDES_LANE_MULTI_RX_DVALID_PS_11_SEL (1 << 30) + +#define SERDES_LANE_MULTI_RX_DVALID_PS_11_VAL (1 << 31) + +/**** reserved register ****/ + +#define SERDES_LANE_RESERVED_OUT_MASK 0x000000FF +#define SERDES_LANE_RESERVED_OUT_SHIFT 0 + +#define SERDES_LANE_RESERVED_IN_MASK 0x00FF0000 +#define SERDES_LANE_RESERVED_IN_SHIFT 16 + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_serdes_REG_H */ + + + + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_ssm.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_ssm.h new file mode 100644 index 00000000000000..deea91688526e3 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_ssm.h @@ -0,0 +1,172 @@ +/******************************************************************************* +Copyright (C) 2014 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_ssm_api API + * Cryptographic / RAID Acceleration Engine common HAL API + * @ingroup group_ssm + * @{ + * @file al_hal_ssm.h + */ + +#ifndef __AL_HAL_SSM_H__ +#define __AL_HAL_SSM_H__ + +#include +#include +#include + +#define AL_SSM_MAX_SRC_DESCS 31 +#define AL_SSM_MAX_DST_DESCS 31 + +enum al_ssm_op_flags { + AL_SSM_INTERRUPT = AL_BIT(0), /* enable interrupt when the xaction + completes */ + AL_SSM_BARRIER = AL_BIT(1), /* data memory barrier, subsequent xactions + will be served only when the current one + completes */ + AL_SSM_SRC_NO_SNOOP = AL_BIT(2), /* set no snoop on source buffers */ + AL_SSM_DEST_NO_SNOOP = AL_BIT(3), /* set no snoop on destination buffers */ +}; + +/** SSM queue types. + * must be statically allocated and queue type can not be changed in run + * time */ +enum al_ssm_q_type { + AL_CRYPT_AUTH_Q, + AL_MEM_CRC_MEMCPY_Q, + AL_RAID_Q +}; + +/** SSM (security, storage, memory) DMA private data structure + * The driver maintains M2M UDMA structure as the HW consists of two UDMAS. + * both of the UDMAs initializes and managed using the m2m udma module. + * the driver uses RX completion descriptors as the sole indication for + * completing transactions, and disregards any TX completion descriptors. + * Every queue can be marked as crypt/auth queue to be used for crypt/auth + * transactions or crc/csum/memcpy queue to be used for crc/csum/memcpy + * transactions or raid queue to be used for raid transactions + */ +struct al_ssm_dma { + uint16_t dev_id; /** +#include + +/* + * Rx (S2M) Descriptors + */ +#define RX_DESC_META (1<<30) /* Meta data */ +/* Tx (M2S) word1 common Descriptors */ +#define TX_DESC_META_OP_MASK (0x3<<23) +#define TX_DESC_META_OP_SHIFT (23) + +/* Word 1 */ +#define TX_DESC_META_CRC_OP_TYPE_MASK (0x7<<20)/* CRC/Checksum op type mask */ +#define TX_DESC_META_CRC_OP_TYPE_SHIFT (20) +#define TX_DESC_META_CRC_SEND_ORIG (1<<19) /* send the original data */ +#define TX_DESC_META_CRC_ST_CRC_IV (1<<18) /* store buf crc IV to cache*/ +#define TX_DESC_META_CRC_SAVE_IV (1<<17) /* store crc res in cache */ +#define TX_DESC_META_CRC_SEND_CRC (1<<16) /* send crc resualt */ +#define TX_DESC_META_CRC_USE_ST_IV (1<<15) /* use IV from cache */ +#define TX_DESC_META_CRC_VALID (1<<14) /* Validate crc */ + +#define TX_DESC_META_CRC_SWAP_MASK (0xff<<6) /* Swapping */ +#define TX_DESC_META_CRC_SWAP_SHIFT (6) + +#define TX_DESC_META_CRC_IDX_MASK (0x7<<3) /* Cached CRC IV index mask */ +#define TX_DESC_META_CRC_IDX_SHIFT (3) + +#define RX_DESC_META_CRC_FIRST_BUF (1<<1) /* First buffer in the block */ +#define RX_DESC_META_CRC_LAST_BUF (1<<0) /* First buffer in the block */ + +/* Tx (M2S) word2 Descriptors -> in XOR */ + +/* Tx (M2S) word3 Descriptors -> Out XOR */ + +#define AL_CRC_CHECKSUM 2 + +#define RX_COMP_STATUS_MASK 0 + +/** CRC/checksum operation type according, values according + * to HW descriptor setting + */ +enum al_crc_checksum_type { + AL_CRC_CHECKSUM_NULL = 0, + AL_CRC_CHECKSUM_CRC32 = 1, + AL_CRC_CHECKSUM_CRC32C = 2, + AL_CRC_CHECKSUM_CKSM16 = 3 +}; + +/** CRC/Checksum Operation bit/byte swap */ +enum al_crcsum_swap_flags { + IV_BIT_SWAP = AL_BIT(7), + IV_BYTE_SWAP = AL_BIT(6), + SRC_BIT_SWAP = AL_BIT(5), + SRC_BYTE_SWAP = AL_BIT(4), + RES_BIT_SWAP = AL_BIT(1), + RES_BYTE_SWAP = AL_BIT(0) +}; + +/** Memcpy transaction + */ +struct al_memcpy_transaction { + enum al_ssm_op_flags flags; + struct al_block src; /**< In data - scatter gather*/ + struct al_block dst; /**< Out data - scatter gather*/ + uint32_t tx_descs_count;/* number of tx descriptors created for this */ + /* transaction, this field set by the hal */ +}; + +/** CRC/Checksum transaction + * In case dst, iv_in, crc_out are not valid, set the al_buf->len to 0 + * When the dst is not empty the src will be copied to the dst and CRC/checksum + * will be calculated on the fly. + */ +struct al_crc_transaction { + enum al_crc_checksum_type crcsum_type; /*< CRC type/ Cheksum */ + enum al_ssm_op_flags flags; + struct al_block src; /**< In data - scatter gather*/ + struct al_block dst; /**< Out data - scatter gather*/ + uint32_t tx_descs_count;/* number of tx descriptors created for this */ + /* transaction, this field set by the hal */ + + /** + * Virtual machine ID for misc buffers below - input and output data + * blocks contains vmid inside the block structure + */ + uint16_t misc_vmid; + + struct al_buf crc_iv_in; /**< CRC IV, if not set will use from cache */ + uint32_t cached_crc_indx;/**< cached CRC index in crypto engine */ + al_bool save_crc_iv; /**< Save IV in the cache */ + al_bool st_crc_out; /**< Store CRC out in the cache */ + struct al_buf crc_expected; /**< Expected CRC to validate */ + struct al_buf crc_out; /**< Calculated CRC/Checksum buffer */ + + /* Enhanced */ + enum al_crcsum_swap_flags swap_flags; /**< Swap fields */ + al_bool xor_valid; /**< valid in and out XOR */ + uint32_t in_xor; /**< CRC Input XOR */ + uint32_t res_xor; /**< CRC Result XOR */ +}; + +/** + * Send Memcpy transaction to the HW + * + * Perform the following steps: + * - Calculate the number of needed RX descriptors and check if the RX UDMA + * have available space. + * - Do the same for TX descriptors. + * - Prepare the RX descriptors. + * - Update the tail pointer of the submission ring of the RX UDMA about the + * new prepared descriptors. + * - Prepare the TX descriptors. + * + * Note: A given UDMA queue can be used either for crypto/authentication + * transactions or for crc/csum/memcpy transactions, but not for both types. + * + * @param dma crypto DMA handle + * @param qid queue index + * @param xaction transaction context + * + * @return 0 if no error found. + * <0 otherwise. + */ +int al_memcpy_prepare(struct al_ssm_dma *dma, uint32_t qid, + struct al_memcpy_transaction *xaction); + +/** + * Send CRC/Checksum transaction to the HW + * + * Perform the following steps: + * - Calculate the number of needed RX descriptors and check if the RX UDMA + * have available space. + * The number of descriptors depends on which buffers are passed in + * the transaction (crc_out) and the number of dest buffers. + * - Do the same for TX descriptors. The number of descriptors depends on + * which buffers are passed in the transaction (crc_iv_in, crc_expected) and + * the number of source buffers. + * - Prepare the RX descriptors. + * - Update the tail pointer of the submission ring of the RX UDMA + * about the new prepared descriptors. + * - Prepare the TX descriptors. + * + * Note: A given UDMA queue can be used either for crypto/authentication + * transactions or for crc/csum/memcpy transactions, but not for both types. + * + * @param dma crypto DMA handle + * @param qid queue index + * @param xaction transaction context + * + * @return 0 if no error found. + * -ENOSPC if no space available. + */ +int al_crc_csum_prepare(struct al_ssm_dma *dma, uint32_t qid, + struct al_crc_transaction *xaction); + +/** + * Start asynchronous execution of crypto/auth or CRC/Checksum transaction + * + * Update the tail pointer of the submission ring of the TX UDMA about + * previously prepared descriptors. + * This function could return before the hardware start the work as its an + * asynchronous non-blocking call to the hardware. + * + * @param dma crypto DMA handle + * @param qid queue index + * @param tx_descs number of tx descriptors to be processed by the engine + * + * @return 0 if no error found. + * -EINVAL if quid is out of range + */ +int al_crc_memcpy_dma_action(struct al_ssm_dma *dma, uint32_t qid, + int tx_descs); + +/** + * Check and cleanup completed transaction + * + * when the upper layer decides to check for completed transaction + * (e.g. due to interrupt) it calls al_crypto_dma_completion() + * API function provided by this driver. this function will call helper + * function provided by the m2m_udma module to check for completed requests. + * The al_crypto_dma_completion() is responsible for the cleanup of the + * completed request from the completion ring, so upper layer don't need to + * worry about the queues management. + * This driver doesn't provide the upper layer which transaction was + * completed, the upper layer should find this information by itself relying + * on the fact that for a given queue, the transaction completed in the same + * order it was sent to the same queue, no ordering is guaranteed between + * transaction that sent to different queues. + + * @param dma crypto DMA handle + * @param qid queue index + * @param comp_status status reported by rx completion descriptor + * + * @return the number of completed transactions. + */ +int al_crc_memcpy_dma_completion(struct al_ssm_dma *dma, + uint32_t qid, + uint32_t *comp_status); + + /* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of crc_memcpy group */ +#endif /* __AL_HAL_SSM_CRC_MEMCPY_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_ssm_crypto.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_ssm_crypto.h new file mode 100644 index 00000000000000..31a67343a4d0dc --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_ssm_crypto.h @@ -0,0 +1,432 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_crypto_api API + * Cryptographic Acceleration Engine HAL driver API + * @ingroup group_crypto + * @{ + * @file al_hal_ssm_crypto.h + */ + +#ifndef __AL_HAL_CRYPT_H__ +#define __AL_HAL_CRYPT_H__ + +#include +#include +#include +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +#define CRYPTO_DEBUG + +#ifdef CRYPTO_DEBUG +#define al_debug al_dbg +#else +#define al_debug(...) +#endif + +/* PCI Adapter Device/Revision ID */ +#define AL_CRYPTO_DEV_ID 0x0011 +#define AL_CRYPTO_REV_ID_0 0 +#define AL_CRYPTO_REV_ID_1 1 + +#define CACHED_SAD_SIZE 16 +#define CRC_IV_CACHE_SIZE 8 + +/** How many descriptors to save between head and tail in case of + * wrap around. + */ +#define AL_CRYPT_DESC_RES 0 + +/* Application IOFIC definitions */ +#define AL_CRYPTO_APP_REGS_BASE_OFFSET 0x800 +#define AL_CRYPTO_APP_IOFIC_OFFSET 0x0 + +/* interrupt controller group A */ +#define AL_CRYPTO_APP_INT_A_S2M_TIMOUT AL_BIT(0) +#define AL_CRYPTO_APP_INT_A_M2S_TIMOUT AL_BIT(1) +#define AL_CRYPTO_APP_INT_A_EOP_WITHOUT_SOP AL_BIT(2) +#define AL_CRYPTO_APP_INT_A_SOP_WITHOUT_EOP AL_BIT(3) +#define AL_CRYPTO_APP_INT_A_SOP_WITH_EOP_TOGETHER AL_BIT(4) +#define AL_CRYPTO_APP_INT_A_UNMAP_PROTOCOL AL_BIT(5) +#define AL_CRYPTO_APP_INT_A_FIFO_OVERRUN AL_BIT(6) +#define AL_CRYPTO_APP_INT_A_ALL \ + AL_CRYPTO_APP_INT_A_S2M_TIMOUT | \ + AL_CRYPTO_APP_INT_A_M2S_TIMOUT | \ + AL_CRYPTO_APP_INT_A_EOP_WITHOUT_SOP | \ + AL_CRYPTO_APP_INT_A_SOP_WITHOUT_EOP | \ + AL_CRYPTO_APP_INT_A_SOP_WITH_EOP_TOGETHER | \ + AL_CRYPTO_APP_INT_A_UNMAP_PROTOCOL | \ + AL_CRYPTO_APP_INT_A_FIFO_OVERRUN + +/** Crypto modes, auth, enc or enc+Auth */ +enum al_crypto_sa_op { + AL_CRYPT_RES = 0, + AL_CRYPT_ENC_ONLY = 1, + AL_CRYPT_AUTH_ONLY = 2, + AL_CRYPT_ENC_AUTH = 3 +}; + +/** Encryption types */ +enum al_crypto_sa_enc_type { + AL_CRYPT_DES_ECB = 0, + AL_CRYPT_DES_CBC = 1, + AL_CRYPT_TRIPDES_ECB = 2, + AL_CRYPT_TRIPDES_CBC = 3, + AL_CRYPT_AES_ECB = 4, + AL_CRYPT_AES_CBC = 5, + AL_CRYPT_AES_CTR = 6, + AL_CRYPT_AES_CCM = 7, + AL_CRYPT_AES_GCM = 8, + AL_CRYPT_MAX = 9 +}; + +/** 3des modes */ +enum al_crypto_sa_tripdes_m { + AL_CRYPT_TRIPDES_EDE = 1 +}; + +/** AES key sizes */ +enum al_crypto_sa_aes_ksize { + AL_CRYPT_AES_128 = 0, + AL_CRYPT_AES_192 = 1, + AL_CRYPT_AES_256 = 2 +}; + +/** Authentication types */ +enum al_crypto_sa_auth_type { + AL_CRYPT_AUTH_MD5 = 0, + AL_CRYPT_AUTH_SHA1 = 1, + AL_CRYPT_AUTH_SHA2 = 2, + AL_CRYPT_AUTH_AES_CCM = 5, + AL_CRYPT_AUTH_AES_GCM = 6 +}; + +/** SHA2 key sizes */ +enum al_crypto_sa_sha2_mode { + AL_CRYPT_SHA2_256 = 0, + AL_CRYPT_SHA2_384 = 1, + AL_CRYPT_SHA2_512 = 2 +}; + +/** CNTR size */ +enum al_crypto_cntr_size { + AL_CRYPT_CNTR_16_BIT = 0, + AL_CRYPT_CNTR_32_BIT = 1, + AL_CRYPT_CNTR_64_BIT = 2, + AL_CRYPT_CNTR_128_BIT = 3 +}; + +/** Crypto SA (Security Association) parameters match the HW crypto SAD + * The cached SAD is not managed by the HAL, The HAL only supply the ability + * to push new SA to the cached SAD and evict a cached SAD through the + * al_crypto_dma_action API. + * Evicting an SA may be required in the following cases: + * - Each time SA is evicted while using IV generated by the Crypto engine + * - Each time SA is evicted while using the SA to hold a temp MAC signature + * - On the first time SA is evicted when using AES decryption key + * generated by the HW + * Fetching an SA can be done by pushing a new SA entry through + * al_crypto_transaction SA_in and placing an appropriate buffer in the + * SA_out. + * Initializing a new SA entry should be done through al_crypto_hw_sa_init. + * + */ +struct al_crypto_sa { + enum al_crypto_sa_op sa_op; /**< crypto operation */ + + /* Enc */ + enum al_crypto_sa_enc_type enc_type; + enum al_crypto_sa_tripdes_m tripdes_m; /**< 3des mode EDE */ + enum al_crypto_sa_aes_ksize aes_ksize; + enum al_crypto_cntr_size cntr_size; /**< relevant only for Alg using + CNTR mode*/ + + uint32_t enc_offset; /**< + enc start offset from start of buffer, + used only if not set through the crypto operation */ + uint32_t enc_offset_eop; /**< + enc offset from end of buffer, + used only if not set through the crypto operation */ + + uint8_t enc_key[32]; + uint8_t enc_iv[16]; + + /* Auth */ + enum al_crypto_sa_auth_type auth_type; + enum al_crypto_sa_sha2_mode sha2_mode; + al_bool auth_hmac_en; + uint32_t signature_size; /**< sign size out in 4 * (size + 1) bytes */ + al_bool auth_signature_msb; /**< when the signature output size is smaller than + the authentication algorithm output size take the more significant + bits from the full size signature */ + uint32_t auth_offset; /**< + auth start offser from start of buffer, + used only if not set through the crypto operation */ + uint32_t auth_offset_eop;/**< + auth offset from end of buffer, + used only if not set through the crypto operation */ + uint8_t auth_iv_in[64]; + uint8_t hmac_iv_in[64]; /**< H(K xor ipad) */ + uint8_t hmac_iv_out[64];/**< H(K xor opad) */ + uint8_t enc_ccm_cbc_iv_add[4]; /**< + Used in CCM to generate Auth IV from encryption IV */ + uint8_t aes_gcm_auth_iv[16]; /**< GCM auth IV */ + + /* Combined */ + al_bool sign_after_enc; /**< common case is true */ + al_bool auth_after_dec; /**< common case is false */ + +}; + +/** A single Crypto SA HW as cached in the SAD, each SA is described as an + * array of 32-bit words */ +struct al_crypto_hw_sa { + uint32_t sa_word[64]; +}; + +/** Crypto operation direction, values according to HW descriptor setting */ +enum al_crypto_dir { + AL_CRYPT_ENCRYPT = 0, + AL_CRYPT_DECRYPT = 1, +}; + +/* transaction completion status */ +#define AL_CRYPT_AUTH_ERROR AL_BIT(0) +#define AL_CRYPT_SA_IV_EVICT_FIFO_ERROR AL_BIT(8) +#define AL_CRYPT_DES_ILLEGAL_KEY_ERROR AL_BIT(9) +#define AL_CRYPT_M2S_ERROR AL_BIT(10) +#define AL_CRYPT_SRAM_PARITY_ERROR AL_BIT(11) +#define AL_CRYPT_INTERNAL_FLOW_VIOLATION_ERROR AL_BIT(15) + +/** Crypto transaction for enc, auth or enc+auth. + * In case sa_update, iv_*, auth_* are not valid, set the al_buf->len to 0 + * In case dst is not valid set the al_block->num to 0. + * + * All Crypto transaction are associated with a cached SA, + * this SA is passed as the SA index into the cached SAD sa_index. + * + * The al_crypto_dma_action support source scatter list buffer encryption, + * authentication and encryption and authentication in one pass. + * + * When using an SA type of Authentication only, the Crypto can support + * splitting the Authentication operation into few requests by using the + * auth_first, last and valid flags and using an Authentication IV auth_iv_in + * and auth_iv_out. + * + * When using an SA type of Encryption (Enc only or enc+Auth), the Crypto + * can get the IV required for the encryption from the upper layer enc_iv_in, + * or using IV generated by the engine (based on the previous encryption + * executed using this SA). In any case the IV used by the engine can be + * passed to the upper layer through the enc_iv_out. + * + * When executing a signature verification operation on an SA type of + * Encryption and Authentication or of Authentication Only with last + * indication, the crypto can compare the actual buffer signature + * auth_sign_in to the engine outcome and indicate the result on the S2M + * completion. In any case the engine signature can be passed to the upper + * layer through the auth_sign_out. + * + * When using Authentication only and dst isnt empty the src will be copied + * to the dst. + */ +struct al_crypto_transaction { + enum al_crypto_dir dir; + enum al_ssm_op_flags flags; + struct al_block src; /**< In data - scatter gather*/ + uint32_t src_size; /**< Size of source buffer */ + struct al_block dst; /**< Out data - scatter gather */ + uint32_t tx_descs_count;/* number of tx descriptors created for this */ + /* transaction, this field set by the hal */ + + /** + * Virtual machine ID for misc buffers below - input and output data + * blocks contains vmid inside the block structure + */ + uint16_t misc_vmid; + + /* SA */ + uint32_t sa_indx; /**< SA index in the cached SAD to use */ + struct al_buf sa_in; /**< pointer to SA al_crypto_hw_sa to + update in the cached SAD */ + struct al_buf sa_out; /**< pointer to SA where to place + old cached SA */ + + /* Enc */ + struct al_buf enc_iv_in; /**< IV from user, if not set will + use IV from the SA */ + struct al_buf enc_iv_out; /**< Optional - Buffer to place + the used IV */ + struct al_buf enc_next_iv_out; /**< Optional - Buffer to place + next used IV */ + uint32_t enc_in_off; /**< offset where to satrt enc */ + uint32_t enc_in_len; /**< + length of enc, if len set to 0 will use SA defaults */ + + /* Auth */ + al_bool auth_fl_valid; /**< + valid indication for the auth first and last + indications */ + al_bool auth_first; /**< Relevant for auth only SA */ + al_bool auth_last; /**< Relevant for auth onlly SA */ + struct al_buf auth_iv_in; /**< In case of auth only SA and + auth_first isnt set this is the + intermediate auth input. */ + struct al_buf auth_iv_out; /**< In case of auth only SA and + auth_last isnt set, This is the + intermediate auth output. */ + struct al_buf auth_sign_in; /**< The Signature to validate in front + of auth output.*/ + struct al_buf auth_sign_out; /**< In case of combined enc and + auth SA or Auth only SA with last + this is the signature output + of the auth. + Size should be as indicated in the + SA signature_size + (sign_size+1)*4 */ + uint32_t auth_in_off; /**< offset where to start auth */ + uint32_t auth_in_len; /**< + length of auth, if set to 0 will use SA defaults */ + uint32_t auth_bcnt; /**< + This field should be zero, unless this + this packet is for AUTH only SA with + last and not first. + In this case is will indicate the byte + count of the auth data till this point. + When using this feild the auth off and len + must contain a valid data */ + + +}; + +/** + * Initialize a single hw_sa + * + * @param sa crypto SA containing the desired SA parameters + * @param hw_sa crypto HW SA filled with zeros + * to be initialized according to the sa + * + * @return 0 if no error found, + * -EINVAL otherwise. + */ +int al_crypto_hw_sa_init(struct al_crypto_sa *sa, + struct al_crypto_hw_sa *hw_sa); + +/** + * Prepare crypto/auth transaction to the HW + * + * Perform the following steps: + * - Calculate needed RX descriptors and check if the RX UDMA have available + * space. The number of descriptors depends on which buffers are passed in + * the transaction (SA_out, enc_IV_out, Sign_out) and the number of dest + * buffers. + * - Do the same for TX descriptors. The number of descriptors depends on + * which buffers are passed in the transaction (SA_in, enc_IV_in, Sign_in) + * and the number of source buffers. + * - Prepare the RX descriptors. + * - Update the tail pointer of the submission ring of the RX UDMA + * about the new prepared descriptors. + * - Prepare the TX descriptors. + * + * Note: UDMA queue can be used either for crypto/authentication transactions + * or for crc/csum/memcpy transactions, but not for both types. + * + * @param dma crypto DMA handle + * @param qid queue index + * @param xaction transaction context, number of prepared TX descriptors is + * returned in xaction->tx_descs_count + * + * @return 0 if no error found. + * -ENOSPC if no space available. + */ +int al_crypto_dma_prepare(struct al_ssm_dma *dma, uint32_t qid, + struct al_crypto_transaction *xaction); + +/** + * Start asynchronous execution of crypto/auth or CRC/Checksum transaction + * + * Update the tail pointer of the submission ring of the TX UDMA about + * previously prepared descriptors. + * This function could return before the hardware start the work as its an + * asynchronous non-blocking call to the hardware. + * + * @param dma crypto DMA handle + * @param qid queue index + * @param tx_descs number of tx descriptors to be processed by the engine + * + * @return 0 if no error found. + * -EINVAL if quid is out of range + */ +int al_crypto_dma_action(struct al_ssm_dma *dma, uint32_t qid, + int tx_descs); + +/** + * Check and cleanup completed transaction + * + * when the upper layer decides to check for completed transaction + * (e.g. due to interrupt) it calls al_crypto_dma_completion() + * API function provided by this driver. this function will call helper + * function provided by the m2m_udma module to check for completed requests. + * The al_crypto_dma_completion() is responsible for the cleanup of the + * completed request from the completion ring, so upper layer don't need to + * worry about the queues management. + * This driver doesn't provide the upper layer which transaction was + * completed, the upper layer should find this information by itself relying + * on the fact that for a given queue, the transaction completed in the same + * order it was sent to the same queue, no ordering is guaranteed between + * transaction that sent to different queues. + + * @param dma crypto DMA handle + * @param qid queue index + * @param comp_status status reported by rx completion descriptor + * + * @return the number of completed transactions. + */ +int al_crypto_dma_completion(struct al_ssm_dma *dma, + uint32_t qid, + uint32_t *comp_status); +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of Crypto group */ +#endif /* __AL_HAL_CRYPT_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_types.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_types.h new file mode 100644 index 00000000000000..e0ee18091cd506 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_types.h @@ -0,0 +1,112 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_common HAL Common Layer + * @{ + * @file al_hal_types.h + * + * @brief macros used by HALs and platform layer + * + */ + +#ifndef __AL_HAL_TYPES_H__ +#define __AL_HAL_TYPES_H__ + +#include +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/* Common defines */ + +typedef int AL_RETURN; + +#if !defined(NULL) +#define NULL (void *)0 +#endif + +#if !defined(likely) +#define likely(x) (x) +#define unlikely(x) (x) +#endif + + +#ifdef __GNUC__ +#if !defined(__packed) +#define __packed __attribute__ ((packed)) +#endif + /* packed and alinged types */ +#define __packed_a4 __attribute__ ((packed, aligned(4))) +#define __packed_a8 __attribute__ ((packed, aligned(8))) +#define __packed_a16 __attribute__ ((packed, aligned(16))) + +#else +#if !defined(__packed) +#error "__packed is not defined!!" +#endif +#endif + +#if !defined(__iomem) +#define __iomem +#endif + +#if !defined(__cache_aligned) +#ifdef __GNUC__ +#define __cache_aligned __attribute__ ((__aligned__(64))) +#else +#define __cache_aligned +#endif +#endif + +#if !defined(INLINE) +#ifdef __GNUC__ +#define INLINE inline +#else +#define INLINE +#endif +#endif + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of Common group */ +#endif /* __TYPES_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma.h new file mode 100644 index 00000000000000..068eda9b48df21 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma.h @@ -0,0 +1,662 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_udma_api API + * @ingroup group_udma + * UDMA API + * @{ + * @} + * + * @defgroup group_udma_main UDMA Main + * @ingroup group_udma_api + * UDMA main API + * @{ + * @file al_hal_udma.h + * + * @brief C Header file for the Universal DMA HAL driver + * + */ + +#ifndef __AL_HAL_UDMA_H__ +#define __AL_HAL_UDMA_H__ + +#include +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +#define DMA_MAX_Q 4 +#define AL_UDMA_MIN_Q_SIZE 4 +#define AL_UDMA_MAX_Q_SIZE (1 << 16) /* hw can do more, but we limit it */ + +/* Default Max number of descriptors supported per action */ +#define AL_UDMA_DEFAULT_MAX_ACTN_DESCS 16 + +#define DMA_RING_ID_MASK 0x3 +/* New registers ?? */ +/* Statistics - TBD */ + +/** UDMA submission descriptor */ +union al_udma_desc { + /* TX */ + struct { + uint32_t len_ctrl; + uint32_t meta_ctrl; + uint64_t buf_ptr; + } tx; + /* TX Meta, used by upper layer */ + struct { + uint32_t len_ctrl; + uint32_t meta_ctrl; + uint32_t meta1; + uint32_t meta2; + } tx_meta; + /* RX */ + struct { + uint32_t len_ctrl; + uint32_t buf2_ptr_lo; + uint64_t buf1_ptr; + } rx; +} __packed_a16; + +/* TX desc length and control fields */ + +#define AL_M2S_DESC_CONCAT AL_BIT(31) /* concatenate */ +#define AL_M2S_DESC_DMB AL_BIT(30) + /** Data Memory Barrier */ +#define AL_M2S_DESC_NO_SNOOP_H AL_BIT(29) +#define AL_M2S_DESC_INT_EN AL_BIT(28) /** enable interrupt */ +#define AL_M2S_DESC_LAST AL_BIT(27) +#define AL_M2S_DESC_FIRST AL_BIT(26) +#define AL_M2S_DESC_RING_ID_SHIFT 24 +#define AL_M2S_DESC_RING_ID_MASK (0x3 << AL_M2S_DESC_RING_ID_SHIFT) +#define AL_M2S_DESC_META_DATA AL_BIT(23) +#define AL_M2S_DESC_DUMMY AL_BIT(22) /* for Metdata only */ +#define AL_M2S_DESC_LEN_ADJ_SHIFT 20 +#define AL_M2S_DESC_LEN_ADJ_MASK (0x7 << AL_M2S_DESC_LEN_ADJ_SHIFT) +#define AL_M2S_DESC_LEN_SHIFT 0 +#define AL_M2S_DESC_LEN_MASK (0xfffff << AL_M2S_DESC_LEN_SHIFT) + +#define AL_S2M_DESC_NO_SNOOP_H AL_BIT(29) +#define AL_S2M_DESC_INT_EN AL_BIT(28) /** enable interrupt */ +#define AL_S2M_DESC_RING_ID_SHIFT 24 +#define AL_S2M_DESC_RING_ID_MASK (0x3 << AL_S2M_DESC_RING_ID_SHIFT) +#define AL_S2M_DESC_LEN_SHIFT 0 +#define AL_S2M_DESC_LEN_MASK (0xffff << AL_S2M_DESC_LEN_SHIFT) + +/* TX/RX descriptor VMID field (in the buffer address 64 bit field) */ +#define AL_UDMA_DESC_VMID_SHIFT 48 + +/** UDMA completion descriptor */ +union al_udma_cdesc { + /* TX completion */ + struct { + uint32_t ctrl_meta; + } al_desc_comp_tx; + /* RX completion */ + struct { + /* TBD */ + uint32_t ctrl_meta; + } al_desc_comp_rx; +} __packed_a4; + +/* TX/RX common completion desc ctrl_meta feilds */ +#define AL_UDMA_CDESC_ERROR AL_BIT(31) +#define AL_UDMA_CDESC_LAST AL_BIT(27) +#define AL_UDMA_CDESC_FIRST AL_BIT(26) +/* word 2 */ +#define AL_UDMA_CDESC_BUF2_USED AL_BIT(31) +/** Basic Buffer structure */ +struct al_buf { + al_phys_addr_t addr; /**< Buffer physical address */ + uint32_t len; /**< Buffer lenght in bytes */ +}; + +/** Block is a set of buffers that belong to same source or destination */ +struct al_block { + struct al_buf *bufs; /**< The buffers of the block */ + uint32_t num; /**< Number of buffers of the block */ + + /**< + * VMID to be assigned to the block descriptors + * Requires VMID in descriptor to be enabled for the specific UDMA + * queue. + */ + uint16_t vmid; +}; + +/** UDMA type */ +enum al_udma_type { + UDMA_TX, + UDMA_RX +}; + +/** UDMA state */ +enum al_udma_state { + UDMA_DISABLE = 0, + UDMA_IDLE, + UDMA_NORMAL, + UDMA_ABORT, + UDMA_RESET +}; + +extern const char *const al_udma_states_name[]; + +/** UDMA Q specific parameters from upper layer */ +struct al_udma_q_params { + uint32_t size; /**< ring size (in descriptors), submission and + * completion rings must have same size + */ + union al_udma_desc *desc_base; /**< cpu address for submission ring + * descriptors + */ + al_phys_addr_t desc_phy_base; /**< submission ring descriptors + * physical base address + */ + uint8_t *cdesc_base; /**< completion descriptors pointer, NULL */ + /* means no completion update */ + al_phys_addr_t cdesc_phy_base; /**< completion descriptors ring + * physical base address + */ + uint32_t cdesc_size; /**< size (in bytes) of a single dma completion + * descriptor + */ + + uint16_t dev_id; /**next_cdesc_idx - (udma_q->next_desc_idx + 1); + tmp &= udma_q->size_mask; + + return (uint32_t) tmp; +} + +/** + * check if queue has pending descriptors + * + * @param udma_q queue handle + * + * @return AL_TRUE if descriptors are submitted to completion ring and still + * not completed (with ack). AL_FALSE otherwise. + */ +static INLINE al_bool al_udma_is_empty(struct al_udma_q *udma_q) +{ + if (((udma_q->next_cdesc_idx - udma_q->next_desc_idx) & + udma_q->size_mask) == 0) + return AL_TRUE; + + return AL_FALSE; +} + +/** + * get next available descriptor + * @param udma_q queue handle + * + * @return pointer to the next available descriptor + */ +static INLINE union al_udma_desc *al_udma_desc_get(struct al_udma_q *udma_q) +{ + union al_udma_desc *desc; + uint16_t next_desc_idx; + + al_assert(udma_q); + + next_desc_idx = udma_q->next_desc_idx; + desc = udma_q->desc_base_ptr + next_desc_idx; + + next_desc_idx++; + + /* if reached end of queue, wrap around */ + udma_q->next_desc_idx = next_desc_idx & udma_q->size_mask; + + return desc; +} + +/** + * get ring id for the last allocated descriptor + * @param udma_q + * + * @return ring id for the last allocated descriptor + * this function must be called each time a new descriptor is allocated + * by the al_udma_desc_get(), unless ring id is ignored. + */ +static INLINE uint32_t al_udma_ring_id_get(struct al_udma_q *udma_q) +{ + uint32_t ring_id; + + al_assert(udma_q); + + ring_id = udma_q->desc_ring_id; + + /* calculate the ring id of the next desc */ + /* if next_desc points to first desc, then queue wrapped around */ + if (unlikely(udma_q->next_desc_idx) == 0) + udma_q->desc_ring_id = (udma_q->desc_ring_id + 1) & + DMA_RING_ID_MASK; + return ring_id; +} + +/* add DMA action - trigger the engine */ +/** + * add num descriptors to the submission queue. + * + * @param udma_q queue handle + * @param num number of descriptors to add to the queues ring. + * + * @return 0; + */ +static INLINE int al_udma_desc_action_add(struct al_udma_q *udma_q, + uint32_t num) +{ + uint32_t *addr; + + al_assert(udma_q); + al_assert((num > 0) && (num <= udma_q->size)); + + addr = &udma_q->q_regs->rings.drtp_inc; + /* make sure data written to the descriptors will be visible by the */ + /* DMA */ + al_local_data_memory_barrier(); + + /* + * As we explicitly invoke the synchronization function + * (al_data_memory_barrier()), then we can use the relaxed version. + */ + al_reg_write32_relaxed(addr, num); + + return 0; +} + +#define cdesc_is_first(flags) ((flags) & AL_UDMA_CDESC_FIRST) +#define cdesc_is_last(flags) ((flags) & AL_UDMA_CDESC_LAST) + +/** + * return pointer to the cdesc + offset desciptors. wrap around when needed. + * + * @param udma_q queue handle + * @param cdesc pointer that set by this function + * @param offset offset desciptors + * + */ +static INLINE volatile union al_udma_cdesc *al_cdesc_next( + struct al_udma_q *udma_q, + volatile union al_udma_cdesc *cdesc, + uint32_t offset) +{ + volatile uint8_t *tmp = (volatile uint8_t *) cdesc + offset * udma_q->cdesc_size; + al_assert(udma_q); + al_assert(cdesc); + + /* if wrap around */ + if (unlikely((tmp > udma_q->end_cdesc_ptr))) + return (union al_udma_cdesc *) + (udma_q->cdesc_base_ptr + + (tmp - udma_q->end_cdesc_ptr - udma_q->cdesc_size)); + + return (volatile union al_udma_cdesc *) tmp; +} + +/** + * check if the flags of the descriptor indicates that is new one + * the function uses the ring id from the descriptor flags to know whether it + * new one by comparing it with the curring ring id of the queue + * + * @param udma_q queue handle + * @param flags the flags of the completion descriptor + * + * @return AL_TRUE if the completion descriptor is new one. + * AL_FALSE if it old one. + */ +static INLINE al_bool al_udma_new_cdesc(struct al_udma_q *udma_q, + uint32_t flags) +{ + if (((flags & AL_M2S_DESC_RING_ID_MASK) >> AL_M2S_DESC_RING_ID_SHIFT) + == udma_q->comp_ring_id) + return AL_TRUE; + return AL_FALSE; +} + +/** + * get next completion descriptor + * this function will also increment the completion ring id when the ring wraps + * around + * + * @param udma_q queue handle + * @param cdesc current completion descriptor + * + * @return pointer to the completion descriptor that follows the one pointed by + * cdesc + */ +static INLINE volatile union al_udma_cdesc *al_cdesc_next_update( + struct al_udma_q *udma_q, + volatile union al_udma_cdesc *cdesc) +{ + /* if last desc, wrap around */ + if (unlikely(((volatile uint8_t *) cdesc == udma_q->end_cdesc_ptr))) { + udma_q->comp_ring_id = + (udma_q->comp_ring_id + 1) & DMA_RING_ID_MASK; + return (union al_udma_cdesc *) udma_q->cdesc_base_ptr; + } + return (volatile union al_udma_cdesc *) ((volatile uint8_t *) cdesc + udma_q->cdesc_size); +} + +/** + * get next completed packet from completion ring of the queue + * + * @param udma_q udma queue handle + * @param desc pointer that set by this function to the first descriptor + * note: desc is valid only when return value is not zero + * @return number of descriptors that belong to the packet. 0 means no completed + * full packet was found. + * If the descriptors found in the completion queue don't form full packet (no + * desc with LAST flag), then this function will do the following: + * (1) save the number of processed descriptors. + * (2) save last processed descriptor, so next time it called, it will resume + * from there. + * (3) return 0. + * note: the descriptors that belong to the completed packet will still be + * considered as used, that means the upper layer is safe to access those + * descriptors when this function returns. the al_udma_cdesc_ack() should be + * called to inform the udma driver that those descriptors are freed. + */ +uint32_t al_udma_cdesc_packet_get( + struct al_udma_q *udma_q, + volatile union al_udma_cdesc **desc); + +/** get completion descriptor pointer from its index */ +#define al_udma_cdesc_idx_to_ptr(udma_q, idx) \ + ((volatile union al_udma_cdesc *) ((udma_q)->cdesc_base_ptr + \ + (idx) * (udma_q)->cdesc_size)) + + +/** + * return number of all completed descriptors in the completion ring + * + * @param udma_q udma queue handle + * @param cdesc pointer that set by this function to the first descriptor + * note: desc is valid only when return value is not zero + * note: pass NULL if not interested + * @return number of descriptors. 0 means no completed descriptors were found. + * note: the descriptors that belong to the completed packet will still be + * considered as used, that means the upper layer is safe to access those + * descriptors when this function returns. the al_udma_cdesc_ack() should be + * called to inform the udma driver that those descriptors are freed. + */ +static INLINE uint32_t al_udma_cdesc_get_all( + struct al_udma_q *udma_q, + volatile union al_udma_cdesc **cdesc) +{ + uint16_t count = 0; + + al_assert(udma_q); + + udma_q->comp_head_idx = (uint16_t) + (al_reg_read32(&udma_q->q_regs->rings.crhp) & + 0xFFFF); + + count = (udma_q->comp_head_idx - udma_q->next_cdesc_idx) & + udma_q->size_mask; + + if (cdesc) + *cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx); + + return (uint32_t)count; +} + +/** + * acknowledge the driver that the upper layer completed processing completion + * descriptors + * + * @param udma_q udma queue handle + * @param num number of descriptors to acknowledge + * + * @return 0 + */ +static INLINE int al_udma_cdesc_ack(struct al_udma_q *udma_q, uint32_t num) +{ + al_assert(udma_q); + + udma_q->next_cdesc_idx += num; + udma_q->next_cdesc_idx &= udma_q->size_mask; + + return 0; +} + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ + +#endif /* __AL_HAL_UDMA_H__ */ +/** @} end of UDMA group */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_config.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_config.h new file mode 100644 index 00000000000000..9ed5b8ea3d4b90 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_config.h @@ -0,0 +1,728 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_udma_config UDMA Config + * @ingroup group_udma_api + * UDMA Config API + * @{ + * @file al_hal_udma_config.h + * + * @brief C Header file for the Universal DMA HAL driver for configuration APIs + * + */ + +#ifndef __AL_HAL_UDMA_CONFIG_H__ +#define __AL_HAL_UDMA_CONFIG_H__ + +#include + + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/** Scheduling mode */ +enum al_udma_sch_mode { + STRICT, /* Strict */ + SRR, /* Simple Sound Rubin */ + DWRR /* Deficit Weighted Round Rubin */ +}; + +/** AXI configuration */ +struct al_udma_axi_conf { + uint32_t axi_timeout; /* Timeout for AXI transactions */ + uint8_t arb_promotion; /* arbitration promotion */ + al_bool swap_8_bytes; /* enable 8 bytes swap instead of 4 bytes */ + al_bool swap_s2m_data; + al_bool swap_s2m_desc; + al_bool swap_m2s_data; + al_bool swap_m2s_desc; +}; + +/** UDMA AXI M2S configuration */ +struct al_udma_axi_submaster { + uint8_t id; /* AXI ID */ + uint8_t cache_type; + uint8_t burst; + uint16_t used_ext; + uint8_t bus_size; + uint8_t qos; + uint8_t prot; + uint8_t max_beats; +}; + +/** UDMA AXI M2S configuration */ +struct al_udma_m2s_axi_conf { + struct al_udma_axi_submaster comp_write; + struct al_udma_axi_submaster data_read; + struct al_udma_axi_submaster desc_read; + al_bool break_on_max_boundary; /* Data read break on max boundary */ + uint8_t min_axi_beats; /* Minimum burst for writing completion desc. */ + uint8_t ostand_max_data_read; + uint8_t ostand_max_desc_read; + uint8_t ostand_max_comp_req; + uint8_t ostand_max_comp_write; +}; + +/** UDMA AXI S2M configuration */ +struct al_udma_s2m_axi_conf { + struct al_udma_axi_submaster data_write; + struct al_udma_axi_submaster desc_read; + struct al_udma_axi_submaster comp_write; + al_bool break_on_max_boundary; /* Data read break on max boundary */ + uint8_t min_axi_beats; /* Minimum burst for writing completion desc. */ + uint8_t ostand_max_data_req; + uint8_t ostand_max_data_write; + uint8_t ostand_max_comp_req; + uint8_t ostand_max_comp_write; + uint8_t ostand_max_desc_read; + uint8_t ack_fifo_depth; /* size of the stream application ack fifo */ +}; + +/** M2S error logging */ +struct al_udma_err_log { + uint32_t error_status; + uint32_t header[4]; +}; + +/** M2S max packet size configuration */ +struct al_udma_m2s_pkt_len_conf { + uint32_t max_pkt_size; + al_bool encode_64k_as_zero; +}; + +/** M2S Descriptor Prefetch configuration */ +struct al_udma_m2s_desc_pref_conf { + uint8_t desc_fifo_depth; + enum al_udma_sch_mode sch_mode; /* Scheduling mode + * (either strict or RR) */ + + uint8_t max_desc_per_packet; /* max number of descriptors to + * prefetch */ + /* in one burst (5b) */ + uint8_t pref_thr; + uint8_t min_burst_above_thr; /* min burst size when fifo above + * pref_thr (4b) + */ + uint8_t min_burst_below_thr; /* min burst size when fifo below + * pref_thr (4b) + */ + uint8_t max_pkt_limit; /* maximum number of packets in the data + * read FIFO, defined based on header + * FIFO size + */ + uint16_t data_fifo_depth; /* maximum number of data beats in the + * data read FIFO, + * defined based on header FIFO size + */ +}; + +/** S2M Descriptor Prefetch configuration */ +struct al_udma_s2m_desc_pref_conf { + uint8_t desc_fifo_depth; + enum al_udma_sch_mode sch_mode; /* Scheduling mode * + * (either strict or RR) + */ + + al_bool q_promotion; /* enable promotion */ + al_bool force_promotion; /* force promotion */ + al_bool en_pref_prediction; /* enable prefetch prediction */ + uint8_t promotion_th; /* Threshold for queue promotion */ + + uint8_t pref_thr; + uint8_t min_burst_above_thr; /* min burst size when fifo above + * pref_thr (4b) + */ + uint8_t min_burst_below_thr; /* min burst size when fifo below + * pref_thr (4b) + */ + uint8_t a_full_thr; /* almost full threshold */ +}; + +/** S2M Data write configuration */ +struct al_udma_s2m_data_write_conf { + uint16_t data_fifo_depth; /* maximum number of data beats in the + * data write FIFO, defined based on + * header FIFO size + */ + uint8_t max_pkt_limit; /* maximum number of packets in the + * data write FIFO,defined based on + * header FIFO size + */ + uint8_t fifo_margin; + uint32_t desc_wait_timer; /* waiting time for the host to write + * new descriptor to the queue + * (for the current packet in process) + */ + uint32_t flags; /* bitwise of flags of s2m + * data_cfg_2 register + */ +}; + +/** S2M Completion configuration */ +struct al_udma_s2m_completion_conf { + uint8_t desc_size; /* Size of completion descriptor + * in words + */ + al_bool cnt_words; /* Completion fifo in use counter: + * AL_TRUE words, AL_FALS descriptors + */ + al_bool q_promotion; /* Enable promotion of the current + * unack in progress */ + /* in the completion write scheduler */ + al_bool force_rr; /* force RR arbitration in the + * scheduler + */ + // uint8_t ack_fifo_depth; /* size of the stream application ack fifo */ + uint8_t q_free_min; /* minimum number of free completion + * entries + */ + /* to qualify for promotion */ + + uint16_t comp_fifo_depth; /* Size of completion fifo in words */ + uint16_t unack_fifo_depth; /* Size of unacked fifo in descs */ + uint32_t timeout; /* Ack timout from stream interface */ +}; + +/** M2S UDMA DWRR configuration */ +struct al_udma_m2s_dwrr_conf { + al_bool enable_dwrr; + uint8_t inc_factor; + uint8_t weight; + al_bool pkt_mode; + uint32_t deficit_init_val; +}; + +/** M2S DMA Rate Limitation mode */ +struct al_udma_m2s_rlimit_mode { + al_bool pkt_mode_en; + uint16_t short_cycle_sz; + uint32_t token_init_val; +}; + +/** M2S Stream/Q Rate Limitation */ +struct al_udma_m2s_rlimit_cfg { + uint32_t max_burst_sz; /* maximum number of accumulated bytes in the + * token counter + */ + uint16_t long_cycle_sz; /* number of short cycles between token fill */ + uint32_t long_cycle; /* number of bits to add in each long cycle */ + uint32_t short_cycle; /* number of bits to add in each cycle */ + uint32_t mask; /* mask the different types of rate limiters */ +}; + +enum al_udma_m2s_rlimit_action { + AL_UDMA_STRM_RLIMIT_ENABLE, + AL_UDMA_STRM_RLIMIT_PAUSE, + AL_UDMA_STRM_RLIMIT_RESET +}; + +/** M2S UDMA Q scheduling configuration */ +struct al_udma_m2s_q_dwrr_conf { + uint32_t max_deficit_cnt_sz; /*maximum number of accumulated bytes + * in the deficit counter + */ + al_bool strict; /* bypass DWRR */ + uint8_t axi_qos; + uint16_t q_qos; + uint8_t weight; +}; + +/** M2S UDMA / UDMA Q scheduling configuration */ +struct al_udma_m2s_sc { + enum al_udma_sch_mode sch_mode; /* Scheduling Mode */ + struct al_udma_m2s_dwrr_conf dwrr; /* DWRR configuration */ +}; + +/** UDMA / UDMA Q rate limitation configuration */ +struct al_udma_m2s_rlimit { + struct al_udma_m2s_rlimit_mode rlimit_mode; + /* rate limitation enablers */ +#if 0 + struct al_udma_tkn_bkt_conf token_bkt; /* Token Bucket configuration */ +#endif +}; + +/** UDMA Data read configuration */ +struct al_udma_m2s_data_rd_conf { + uint8_t max_rd_d_beats; /* max burst size for reading data + * (in AXI beats-128b) (5b) + */ + uint8_t max_rd_d_out_req; /* max number of outstanding data + * read requests (6b) + */ + uint16_t max_rd_d_out_beats; /* max num. of data read beats (10b) */ +}; + +/** M2S UDMA completion and application timeouts */ +struct al_udma_m2s_comp_timeouts { + enum al_udma_sch_mode sch_mode; /* Scheduling mode + * (either strict or RR) + */ + al_bool enable_q_promotion; + uint8_t unack_fifo_depth; /* unacked desc fifo size */ + uint8_t comp_fifo_depth; /* desc fifo size */ + uint32_t coal_timeout; /* (24b) */ + uint32_t app_timeout; /* (24b) */ +}; + +/** S2M UDMA per queue completion configuration */ +struct al_udma_s2m_q_comp_conf { + al_bool dis_comp_coal; /* disable completion coalescing */ + al_bool en_comp_ring_update; /* enable writing completion descs */ + uint32_t comp_timer; /* completion coalescing timer */ + al_bool en_hdr_split; /* enable header split */ + al_bool force_hdr_split; /* force header split */ + uint16_t hdr_split_size; /* size used for the header split */ + uint8_t q_qos; /* queue QoS */ +}; + +/** UDMA per queue VMID control configuration */ +struct al_udma_gen_vmid_q_conf { + /* Enable usage of the VMID per queue according to 'vmid' */ + al_bool queue_en; + + /* Enable usage of the VMID from the descriptor buffer address 63:48 */ + al_bool desc_en; + + /* VMID to be applied when 'queue_en' is asserted */ + uint16_t vmid; +}; + +/** UDMA VMID control configuration */ +struct al_udma_gen_vmid_conf { + /* TX queue configuration */ + struct al_udma_gen_vmid_q_conf tx_q_conf[DMA_MAX_Q]; + + /* RX queue configuration */ + struct al_udma_gen_vmid_q_conf rx_q_conf[DMA_MAX_Q]; +}; + +/** UDMA VMID MSIX control configuration */ +struct al_udma_gen_vmid_msix_conf { + /* Enable write to all VMID_n registers in the MSI-X Controller */ + al_bool access_en; + + /* use VMID_n [7:0] from MSI-X Controller for MSI-X message */ + al_bool sel; +}; + +/** UDMA per Tx queue advanced VMID control configuration */ +struct al_udma_gen_vmid_advanced_tx_q_conf { + /********************************************************************** + * Tx Data VMID + **********************************************************************/ + /* Tx data VMID enable */ + al_bool tx_q_data_vmid_en; + + /* + * For Tx data reads, replacement bits for the original address. + * The number of bits replaced is determined according to + * 'tx_q_addr_hi_sel' + */ + unsigned int tx_q_addr_hi; + + /* + * For Tx data reads, 6 bits serving the number of bits taken from the + * extra register on account of bits coming from the original address + * field. + * When 'tx_q_addr_hi_sel'=32 all of 'tx_q_addr_hi' will be taken. + * When 'tx_q_addr_hi_sel'=0 none of it will be taken, and when any + * value in between, it will start from the MSB bit and sweep down as + * many bits as needed. For example if 'tx_q_addr_hi_sel'=8, the final + * address [63:56] will carry 'tx_q_addr_hi'[31:24] while [55:32] will + * carry the original buffer address[55:32]. + */ + unsigned int tx_q_addr_hi_sel; + + /* + * Tx data read VMID + * Masked per bit with 'tx_q_data_vmid_mask' + */ + unsigned int tx_q_data_vmid; + + /* + * Tx data read VMID mask + * Each '1' selects from the buffer address, each '0' selects from + * 'tx_q_data_vmid' + */ + unsigned int tx_q_data_vmid_mask; + + /********************************************************************** + * Tx prefetch VMID + **********************************************************************/ + /* Tx prefetch VMID enable */ + al_bool tx_q_prefetch_vmid_en; + + /* Tx prefetch VMID */ + unsigned int tx_q_prefetch_vmid; + + /********************************************************************** + * Tx completion VMID + **********************************************************************/ + /* Tx completion VMID enable */ + al_bool tx_q_compl_vmid_en; + + /* Tx completion VMID */ + unsigned int tx_q_compl_vmid; +}; + +/** UDMA per Rx queue advanced VMID control configuration */ +struct al_udma_gen_vmid_advanced_rx_q_conf { + /********************************************************************** + * Rx Data VMID + **********************************************************************/ + /* Rx data VMID enable */ + al_bool rx_q_data_vmid_en; + + /* + * For Rx data writes, replacement bits for the original address. + * The number of bits replaced is determined according to + * 'rx_q_addr_hi_sel' + */ + unsigned int rx_q_addr_hi; + + /* + * For Rx data writes, 6 bits serving the number of bits taken from the + * extra register on account of bits coming from the original address + * field. + */ + unsigned int rx_q_addr_hi_sel; + + /* + * Rx data write VMID + * Masked per bit with 'rx_q_data_vmid_mask' + */ + unsigned int rx_q_data_vmid; + + /* Rx data write VMID mask */ + unsigned int rx_q_data_vmid_mask; + + /********************************************************************** + * Rx Data Buffer 2 VMID + **********************************************************************/ + /* Rx data buff2 VMID enable */ + al_bool rx_q_data_buff2_vmid_en; + + /* + * For Rx data buff2 writes, replacement bits for the original address. + * The number of bits replaced is determined according to + * 'rx_q_data_buff2_addr_hi_sel' + */ + unsigned int rx_q_data_buff2_addr_hi; + + /* + * For Rx data buff2 writes, 6 bits serving the number of bits taken + * from the extra register on account of bits coming from the original + * address field. + */ + unsigned int rx_q_data_buff2_addr_hi_sel; + + /* + * Rx data buff2 write VMID + * Masked per bit with 'rx_q_data_buff2_mask' + */ + unsigned int rx_q_data_buff2_vmid; + + /* Rx data buff2 write VMID mask */ + unsigned int rx_q_data_buff2_mask; + + /********************************************************************** + * Rx DDP VMID + **********************************************************************/ + /* Rx DDP write VMID enable */ + al_bool rx_q_ddp_vmid_en; + + /* + * For Rx DDP writes, replacement bits for the original address. + * The number of bits replaced is determined according to + * 'rx_q_ddp_addr_hi_sel' + */ + unsigned int rx_q_ddp_addr_hi; + + /* + * For Rx DDP writes, 6 bits serving the number of bits taken from the + * extra register on account of bits coming from the original address + * field. + */ + unsigned int rx_q_ddp_addr_hi_sel; + + /* + * Rx DDP write VMID + * Masked per bit with 'rx_q_ddp_mask' + */ + unsigned int rx_q_ddp_vmid; + + /* Rx DDP write VMID mask */ + unsigned int rx_q_ddp_mask; + + /********************************************************************** + * Rx prefetch VMID + **********************************************************************/ + /* Rx prefetch VMID enable */ + al_bool rx_q_prefetch_vmid_en; + + /* Rx prefetch VMID */ + unsigned int rx_q_prefetch_vmid; + + /********************************************************************** + * Rx completion VMID + **********************************************************************/ + /* Rx completion VMID enable */ + al_bool rx_q_compl_vmid_en; + + /* Rx completion VMID */ + unsigned int rx_q_compl_vmid; +}; + +/** + * Header split, buffer 2 per queue configuration + * When header split is enabled, Buffer_2 is used as an address for the header + * data. Buffer_2 is defined as 32-bits in the RX descriptor and it is defined + * that the MSB ([63:32]) of Buffer_1 is used as address [63:32] for the header + * address. + */ +struct al_udma_gen_hdr_split_buff2_q_conf { + /* + * MSB of the 64-bit address (bits [63:32]) that can be used for header + * split for this queue + */ + unsigned int addr_msb; + + /* + * Determine how to select the MSB (bits [63:32]) of the address when + * header split is enabled (4 bits, one per byte) + * - Bits [3:0]: + * [0] – selector for bits [39:32] + * [1] – selector for bits [47:40] + * [2] – selector for bits [55:48] + * [3] – selector for bits [63:55] + * - Bit value: + * 0 – Use Buffer_1 (legacy operation) + * 1 – Use the queue configuration 'addr_msb' + */ + unsigned int add_msb_sel; +}; + +/* Report Error - to be used for abort */ +void al_udma_err_report(struct al_udma *udma); + +/* Statistics - TBD */ +void al_udma_stats_get(struct al_udma *udma); + +/* Misc configurations */ +/* Configure AXI configuration */ +int al_udma_axi_set(struct udma_gen_axi *axi_regs, + struct al_udma_axi_conf *axi); + +/* Configure UDMA AXI M2S configuration */ +int al_udma_m2s_axi_set(struct al_udma *udma, + struct al_udma_m2s_axi_conf *axi_m2s); + +/* Configure UDMA AXI S2M configuration */ +int al_udma_s2m_axi_set(struct al_udma *udma, + struct al_udma_s2m_axi_conf *axi_s2m); + +/* Configure M2S packet len */ +int al_udma_m2s_packet_size_cfg_set(struct al_udma *udma, + struct al_udma_m2s_pkt_len_conf *conf); + +/* Configure M2S UDMA descriptor prefetch */ +int al_udma_m2s_pref_set(struct al_udma *udma, + struct al_udma_m2s_desc_pref_conf *conf); +int al_udma_m2s_pref_get(struct al_udma *udma, + struct al_udma_m2s_desc_pref_conf *conf); + +/* set m2s packet's max descriptors (including meta descriptors) */ +#define AL_UDMA_M2S_MAX_ALLOWED_DESCS_PER_PACKET 31 +int al_udma_m2s_max_descs_set(struct al_udma *udma, uint8_t max_descs); + +/* set s2m packets' max descriptors */ +#define AL_UDMA_S2M_MAX_ALLOWED_DESCS_PER_PACKET 31 +int al_udma_s2m_max_descs_set(struct al_udma *udma, uint8_t max_descs); + + +/* Configure S2M UDMA descriptor prefetch */ +int al_udma_s2m_pref_set(struct al_udma *udma, + struct al_udma_s2m_desc_pref_conf *conf); +int al_udma_m2s_pref_get(struct al_udma *udma, + struct al_udma_m2s_desc_pref_conf *conf); + +/* Configure S2M UDMA data write */ +int al_udma_s2m_data_write_set(struct al_udma *udma, + struct al_udma_s2m_data_write_conf *conf); + +/* Configure the s2m full line write feature */ +int al_udma_s2m_full_line_write_set(struct al_udma *umda, al_bool enable); + +/* Configure S2M UDMA completion */ +int al_udma_s2m_completion_set(struct al_udma *udma, + struct al_udma_s2m_completion_conf *conf); + +/* Configure the M2S UDMA scheduling mode */ +int al_udma_m2s_sc_set(struct al_udma *udma, + struct al_udma_m2s_dwrr_conf *sched); + +/* Configure the M2S UDMA rate limitation */ +int al_udma_m2s_rlimit_set(struct al_udma *udma, + struct al_udma_m2s_rlimit_mode *mode); +int al_udma_m2s_rlimit_reset(struct al_udma *udma); + +/* Configure the M2S Stream rate limitation */ +int al_udma_m2s_strm_rlimit_set(struct al_udma *udma, + struct al_udma_m2s_rlimit_cfg *conf); +int al_udma_m2s_strm_rlimit_act(struct al_udma *udma, + enum al_udma_m2s_rlimit_action act); + +/* Configure the M2S UDMA Q rate limitation */ +int al_udma_m2s_q_rlimit_set(struct al_udma_q *udma_q, + struct al_udma_m2s_rlimit_cfg *conf); +int al_udma_m2s_q_rlimit_act(struct al_udma_q *udma_q, + enum al_udma_m2s_rlimit_action act); + +/* Configure the M2S UDMA Q scheduling mode */ +int al_udma_m2s_q_sc_set(struct al_udma_q *udma_q, + struct al_udma_m2s_q_dwrr_conf *conf); +int al_udma_m2s_q_sc_pause(struct al_udma_q *udma_q, al_bool set); +int al_udma_m2s_q_sc_reset(struct al_udma_q *udma_q); + +/* M2S UDMA completion and application timeouts */ +int al_udma_m2s_comp_timeouts_set(struct al_udma *udma, + struct al_udma_m2s_comp_timeouts *conf); +int al_udma_m2s_comp_timeouts_get(struct al_udma *udma, + struct al_udma_m2s_comp_timeouts *conf); + + +/** + * S2M UDMA Configure the expected behavior of Rx/S2M UDMA when there are no Rx Descriptors. + * + * @param udma + * @param drop_packet when set to true, the UDMA will drop packet. + * @param gen_interrupt when set to true, the UDMA will generate + * no_desc_hint interrupt when a packet received and the UDMA + * doesn't find enough free descriptors for it. + * @param wait_for_desc_timeout timeout in SB cycles to wait for new + * descriptors before dropping the packets. + * Notes: + * - The hint interrupt is raised immediately without waiting + * for new descs. + * - value 0 means wait for ever. + * + * Notes: + * - When get_interrupt is set, the API won't program the iofic to unmask this + * interrupt, in this case the callee should take care for doing that unmask + * using the al_udma_iofic_config() API. + * + * - The hardware's default configuration is: no drop packet, generate hint + * interrupt. + * - This API must be called once and before enabling the UDMA + * + * @return 0 if no error found. + */ +int al_udma_s2m_no_desc_cfg_set(struct al_udma *udma, al_bool drop_packet, al_bool gen_interrupt, uint32_t wait_for_desc_timeout); + +/** + * S2M UDMA configure a queue's completion update + * + * @param q_udma + * @param enable set to true to enable completion update + * + * completion update better be disabled for tx queues as those descriptors + * doesn't carry useful information, thus disabling it saves DMA accesses. + * + * @return 0 if no error found. + */ +int al_udma_s2m_q_compl_updade_config(struct al_udma_q *udma_q, al_bool enable); + +/** + * S2M UDMA configure a queue's completion descriptors coalescing + * + * @param q_udma + * @param enable set to true to enable completion coalescing + * @param coal_timeout in South Bridge cycles. + * + * @return 0 if no error found. + */ +int al_udma_s2m_q_compl_coal_config(struct al_udma_q *udma_q, al_bool enable, uint32_t coal_timeout); + +/** + * S2M UDMA configure completion descriptors write burst parameters + * + * @param udma + * @param burst_size completion descriptors write burst size in bytes. + * + * @return 0 if no error found. + */int al_udma_s2m_compl_desc_burst_config(struct al_udma *udma, uint16_t + burst_size); + +/* S2M UDMA per queue completion configuration */ +int al_udma_s2m_q_comp_set(struct al_udma_q *udma_q, + struct al_udma_s2m_q_comp_conf *conf); + +/** UDMA VMID control configuration */ +void al_udma_gen_vmid_conf_set( + struct unit_regs __iomem *unit_regs, + struct al_udma_gen_vmid_conf *conf); + +/** UDMA VMID MSIX control configuration */ +void al_udma_gen_vmid_msix_conf_set( + struct unit_regs __iomem *unit_regs, + struct al_udma_gen_vmid_msix_conf *conf); + +/** UDMA VMID control advanced Tx queue configuration */ +void al_udma_gen_vmid_advanced_tx_q_conf( + struct al_udma_q *q, + struct al_udma_gen_vmid_advanced_tx_q_conf *conf); + +/** UDMA VMID control advanced Rx queue configuration */ +void al_udma_gen_vmid_advanced_rx_q_conf( + struct al_udma_q *q, + struct al_udma_gen_vmid_advanced_rx_q_conf *conf); + +/** UDMA header split buffer 2 Rx queue configuration */ +void al_udma_gen_hdr_split_buff2_rx_q_conf( + struct al_udma_q *q, + struct al_udma_gen_hdr_split_buff2_q_conf *conf); + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of UDMA config group */ +#endif /* __AL_HAL_UDMA_CONFIG_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_debug.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_debug.h new file mode 100644 index 00000000000000..3a06958979790a --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_debug.h @@ -0,0 +1,133 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_udma_debug UDMA Debug + * @ingroup group_udma_api + * UDMA Debug + * @{ + * @file al_hal_udma_debug.h + * + * @brief C Header file for the Universal DMA HAL driver for debug APIs + * + */ + +#ifndef __AL_HAL_UDMA_DEBUG_H__ +#define __AL_HAL_UDMA_DEBUG_H__ + +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/* UDMA register print helper macros */ +#define AL_UDMA_PRINT_REG(UDMA, PREFIX, POSTFIX, TYPE, GROUP, REG) \ + al_dbg(PREFIX #REG " = 0x%08x" POSTFIX, al_reg_read32( \ + &(UDMA->udma_regs->TYPE.GROUP.REG))) + +#define AL_UDMA_PRINT_REG_FIELD( \ + UDMA, PREFIX, POSTFIX, FMT, TYPE, GROUP, REG, LBL, FIELD) \ + al_dbg(PREFIX #LBL " = " FMT POSTFIX, al_reg_read32( \ + &(UDMA->udma_regs->TYPE.GROUP.REG)) \ + & FIELD ## _MASK >> FIELD ## _SHIFT) + +#define AL_UDMA_PRINT_REG_BIT( \ + UDMA, PREFIX, POSTFIX, TYPE, GROUP, REG, LBL, FIELD) \ + al_dbg(PREFIX #LBL " = %d" POSTFIX, ((al_reg_read32( \ + &(UDMA->udma_regs->TYPE.GROUP.REG)) \ + & FIELD) != 0)) + +/* UDMA register print mask definitions */ +#define AL_UDMA_DEBUG_QUEUE(n) AL_BIT(n) +#define AL_UDMA_DEBUG_AXI AL_BIT(DMA_MAX_Q) +#define AL_UDMA_DEBUG_GENERAL AL_BIT(DMA_MAX_Q + 1) +#define AL_UDMA_DEBUG_READ AL_BIT(DMA_MAX_Q + 2) +#define AL_UDMA_DEBUG_WRITE AL_BIT(DMA_MAX_Q + 3) +#define AL_UDMA_DEBUG_DWRR AL_BIT(DMA_MAX_Q + 4) +#define AL_UDMA_DEBUG_RATE_LIMITER AL_BIT(DMA_MAX_Q + 5) +#define AL_UDMA_DEBUG_STREAM_RATE_LIMITER AL_BIT(DMA_MAX_Q + 6) +#define AL_UDMA_DEBUG_COMP AL_BIT(DMA_MAX_Q + 7) +#define AL_UDMA_DEBUG_STAT AL_BIT(DMA_MAX_Q + 8) +#define AL_UDMA_DEBUG_FEATURE AL_BIT(DMA_MAX_Q + 9) +#define AL_UDMA_DEBUG_ALL 0xFFFFFFFF + +/* Debug functions */ + +/** + * Print udma registers according to the provided mask + * + * @param udma udma data structure + * @param mask mask that specifies which registers groups to print + * e.g. AL_UDMA_DEBUG_AXI prints AXI registers, AL_UDMA_DEBUG_ALL prints all + * registers + */ +void al_udma_regs_print(struct al_udma *udma, unsigned int mask); + +/** + * Print udma queue software structure + * + * @param udma udma data structure + * @param qid queue index + */ +void al_udma_q_struct_print(struct al_udma *udma, uint32_t qid); + +/** UDMA ring type */ +enum al_udma_ring_type { + AL_RING_SUBMISSION, + AL_RING_COMPLETION +}; + +/** + * Print the ring entries for the specified queue index and ring type + * (submission/completion) + * + * @param udma udma data structure + * @param qid queue index + * @param rtype udma ring type + */ +void al_udma_ring_print(struct al_udma *udma, uint32_t qid, + enum al_udma_ring_type rtype); + + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +#endif /* __AL_HAL_UDMA_DEBUG_H__ */ +/** @} end of UDMA debug group */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_fast.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_fast.h new file mode 100644 index 00000000000000..1b7c102a45b007 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_fast.h @@ -0,0 +1,231 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_udma_fast UDMA Fast API + * + ** @{ + * The UDMA Fast API can be used to perform simple operations by directly modifying + * the UDMA descriptors instead of passing via HAL SW structures and functions. + * This allows to achieve optimal performance for those operations. + * + * Currently RAID controller and MEMCOPY and PARALLEL MEMCOPY operations are + * supported. + * + * @file al_hal_udma_fast.h + * + * @brief Header file for UDMA Fast API + * + */ + +#ifndef __AL_HAL_UDMA_FAST_H__ +#define __AL_HAL_UDMA_FAST_H__ + +#include +#include +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/** + * Prepare MEMCPY udma queues to work in fast mode - init all the descriptors + * according to opcode and flags passed in xaction + * + * @param udma_txq udma tx queue handle + * @param udma_rxq udma rx queue handle + * @param xaction transaction context + * + * @return 0 if no error found + */ +int al_udma_fast_memcpy_q_prepare(struct al_udma_q *udma_txq, + struct al_udma_q *udma_rxq, + struct al_memcpy_transaction *xaction); + +/** + * Get udma descriptor by index in queue + * + * @param udma_q udma queue handle + * @param index descriptor index + * + * @return udma descriptor handle + */ +static INLINE union al_udma_desc *al_udma_fast_desc_get_by_idx(struct al_udma_q *udma_q, + uint32_t index) +{ + union al_udma_desc *desc; + + al_assert(udma_q); + al_assert(index < udma_q->size); + + desc = udma_q->desc_base_ptr + index; + return desc; +} + +/* Work with tx desc structures as buf_ptr, flags and len fields are in same + * location for tx and rx descs + */ +/** + * Set udma descriptor buffer address + * + * @param desc udma descriptor handle + * @param buf_ptr buffer adddress + * @param vmid virtual machine id + */ +static inline void al_udma_fast_desc_buf_set(union al_udma_desc *desc, + al_phys_addr_t buf_ptr, uint16_t vmid) +{ + al_assert(desc); + + desc->tx.buf_ptr = swap64_to_le(buf_ptr | vmid); +} + +/** + * Set udma descriptor flags specified by flags param and mask, while keeping + * flags that are not specified by the mask + * + * @param desc udma descriptor handle + * @param flags flags + * @param mask flags mask + */ +static inline void al_udma_fast_desc_flags_set(union al_udma_desc *desc, + uint32_t flags, uint32_t mask) +{ + uint32_t flags_len; + + al_assert(desc); + + flags_len = swap32_from_le(desc->tx.len_ctrl); + mask &= ~AL_M2S_DESC_LEN_MASK; + flags_len &= ~mask; + flags_len |= flags; + desc->tx.len_ctrl = swap32_to_le(flags_len); +} + +/** + * Set udma descriptor ring id + * + * @param desc udma descriptor handle + * @param ring_id ring id + */ +static inline void al_udma_fast_desc_ring_id_set(union al_udma_desc *desc, + uint32_t ring_id) +{ + uint32_t flags_len; + + al_assert(desc); + + flags_len = swap32_from_le(desc->tx.len_ctrl); + flags_len &= ~AL_M2S_DESC_RING_ID_MASK; + flags_len |= ring_id << AL_M2S_DESC_RING_ID_SHIFT; + desc->tx.len_ctrl = swap32_to_le(flags_len); +} + +/** + * Set udma descriptor buffer length + * + * @param desc udma descriptor handle + * @param len buffer length + */ +static inline void al_udma_fast_desc_len_set(union al_udma_desc *desc, + uint16_t len) +{ + uint32_t flags_len; + + al_assert(desc); + + flags_len = swap32_from_le(desc->tx.len_ctrl); + flags_len &= ~AL_M2S_DESC_LEN_MASK; + flags_len |= len; + desc->tx.len_ctrl = swap32_to_le(flags_len); +} + +/** + * Get up to desc_to_complete completed descriptors + * + * If use_head is set to AL TRUE head register is used to determine number of + * completed descriptors and reg read is performed on every poll operation. + * Otherwise each completion descriptor is read to determine whether it is + * completed. If HW cache coherency is used there's no cache miss until the + * descriptor is completed. + * + * @param udma_rxq udma rx queue handle + * @param descs_to_complete max number of completed descriptors to get + * @param use_head poll head register instead of completion descriptors + */ +static inline int al_udma_fast_completion(struct al_udma_q *udma_rxq, + uint32_t descs_to_complete, al_bool use_head) +{ + volatile union al_udma_cdesc *curr; + uint32_t cdesc_count = 0; + + if (!use_head) { + curr = udma_rxq->comp_head_ptr; + while (descs_to_complete != 0) { + uint32_t comp_flags; + + comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta); + if (al_udma_new_cdesc(udma_rxq, comp_flags) == AL_FALSE) + break; + + cdesc_count++; + descs_to_complete--; + curr = al_cdesc_next_update(udma_rxq, curr); + } + + udma_rxq->comp_head_ptr = curr; + } else { + cdesc_count = al_udma_cdesc_get_all(udma_rxq, NULL); + + if (cdesc_count > descs_to_complete) + cdesc_count = descs_to_complete; + } + + if (cdesc_count) + al_udma_cdesc_ack(udma_rxq, cdesc_count); + + return cdesc_count; +} + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of UDMA Fast group */ +#endif /* __AL_HAL_UDMA_FAST_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_iofic.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_iofic.h new file mode 100644 index 00000000000000..12cda6ae62a82a --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_iofic.h @@ -0,0 +1,613 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_udma_interrupts UDMA I/O Fabric Interrupt Controller + * @ingroup group_udma_api + * UDMA IOFIC API + * @{ + * @file al_hal_udma_iofic.h + * + * @brief C Header file for programming the interrupt controller that found + * in UDMA based units. These APIs rely and use some the Interrupt controller + * API under al_hal_iofic.h + */ + +#ifndef __AL_HAL_UDMA_IOFIC_H__ +#define __AL_HAL_UDMA_IOFIC_H__ + +#include +#include +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/** + * Interrupt Mode + * This is the interrupt mode for the primary interrupt level The secondary + * interrupt level does not have mode and it is always a level sensitive + * interrupt that is reflected in group D of the primary. + */ +enum al_iofic_mode { + AL_IOFIC_MODE_LEGACY, /**< level-sensitive interrupt wire */ + AL_IOFIC_MODE_MSIX_PER_Q, /**< per UDMA queue MSI-X interrupt */ + AL_IOFIC_MODE_MSIX_PER_GROUP +}; + +/** interrupt controller level (primary/secondary) */ +enum al_udma_iofic_level { + AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_UDMA_IOFIC_LEVEL_SECONDARY +}; + +/* + * The next four groups represents the standard 4 groups in the primary + * interrupt controller of each bus-master unit in the I/O Fabric. + * The first two groups can be used when accessing the secondary interrupt + * controller as well. + */ +#define AL_INT_GROUP_A 0 /**< summary of the below events */ +#define AL_INT_GROUP_B 1 /**< RX completion queues */ +#define AL_INT_GROUP_C 2 /**< TX completion queues */ +#define AL_INT_GROUP_D 3 /**< Misc */ + +/******************************************************************************* + * Primary interrupt controller, group A bits + ******************************************************************************/ +/* Group A bits which are just summary bits of GROUP B, C and D */ +#define AL_INT_GROUP_A_GROUP_B_SUM AL_BIT(0) +#define AL_INT_GROUP_A_GROUP_C_SUM AL_BIT(1) +#define AL_INT_GROUP_A_GROUP_D_SUM AL_BIT(2) + +/******************************************************************************* + * MSIX entry indices + ******************************************************************************/ +/** MSIX entry index for summary of group D in group A */ +#define AL_INT_MSIX_GROUP_A_SUM_D_IDX 2 +/** MSIX entry index for RX completion queue 0 */ +#define AL_INT_MSIX_RX_COMPLETION_START 3 + +/******************************************************************************* + * Primary interrupt controller, group D bits + ******************************************************************************/ +#define AL_INT_GROUP_D_CROSS_MAIL_BOXES \ + (AL_BIT(0) | AL_BIT(1) | AL_BIT(2) | AL_BIT(3)) +/** Summary of secondary interrupt controller, group A) */ +#define AL_INT_GROUP_D_M2S AL_BIT(8) +/** Summary of secondary interrupt controller, group B) */ +#define AL_INT_GROUP_D_S2M AL_BIT(9) +#define AL_INT_GROUP_D_SW_TIMER_INT AL_BIT(10) +#define AL_INT_GROUP_D_APP_EXT_INT AL_BIT(11) +#define AL_INT_GROUP_D_ALL \ + AL_INT_GROUP_D_CROSS_MAIL_BOXES | \ + AL_INT_GROUP_D_M2S | \ + AL_INT_GROUP_D_S2M | \ + AL_INT_GROUP_D_SW_TIMER_INT | \ + AL_INT_GROUP_D_APP_EXT_INT + +/* + * Until this point, all description above is for Groups A/B/C/D in the PRIMARY + * Interrupt controller. + * Following are definitions related to the secondary interrupt controller with + * two cause registers (group A and group B) that covers UDMA M2S/S2M errors. + * Secondary interrupt controller summary bits are not mapped to the Processor + * GIC directly, rather they are represented in Group D of the primary interrupt + * controller. + */ + +/****************************************************************************** + * Secondary interrupt Controller, Group A, which holds the TX (M2S) error + * interrupt bits + ******************************************************************************/ + +/** + * MSIx response + * MSIX Bus generator response error, the Bus response received with error indication + */ +#define AL_INT_2ND_GROUP_A_M2S_MSIX_RESP AL_BIT(27) +/** + * MSIx timeout MSIX Bus generator timeout error. + * The generator didn't receive bus response for the MSIx write transaction. + */ +#define AL_INT_2ND_GROUP_A_M2S_MSIX_TO AL_BIT(26) +/** Prefetch header buffer parity error */ +#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_HDR_PARITY AL_BIT(25) +/** Prefetch descriptor buffer parity error */ +#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_DESC_PARITY AL_BIT(24) +/** Data buffer parity error */ +#define AL_INT_2ND_GROUP_A_M2S_DATA_PARITY AL_BIT(23) +/** Data header buffer parity error */ +#define AL_INT_2ND_GROUP_A_M2S_HDR_PARITY AL_BIT(22) +/** Completion coalescing buffer parity error */ +#define AL_INT_2ND_GROUP_A_M2S_COMPL_COAL_PARITY AL_BIT(21) +/** UNACK packets buffer parity error */ +#define AL_INT_2ND_GROUP_A_M2S_UNACK_PKT_PARITY AL_BIT(20) +/** ACK packets buffer parity error */ +#define AL_INT_2ND_GROUP_A_M2S_ACK_PKT_PARITY AL_BIT(19) +/** AXI data buffer parity error */ +#define AL_INT_2ND_GROUP_A_M2S_AX_DATA_PARITY AL_BIT(18) +/** + * Prefetch Ring ID error + * A wrong RingId was received while prefetching submission descriptor. This + * could indicate a software bug or hardware failure, unless the UDMA is + * working in a mode to ignore RingId (the al_udma_iofic_config() API can be + * used to configure the UDMA to ignore the Ring ID check) + */ +#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_RING_ID AL_BIT(17) +/** + * Prefetch last + * Error in last bit indication of the descriptor + * Descriptor with Last bit asserted is read from the queue to the prefetch + * FIFO when the prefetch engine is not in a middle of packet processing (a + * descriptor with First bit asserted should be read first to indicate start of + * packet) + */ +#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_LAST AL_BIT(16) +/** + * Prefetch first + * Error in first bit indication of the descriptor + * Descriptor with First bit asserted is read from the queue to the prefetch + * FIFO while the prefetch engine is in a middle of packet processing ( a + * descriptor with Last bit asserted should be read to indicate end of packet + * before starting a new one) + */ +#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_FIRST AL_BIT(15) +/** + * Prefetch max descriptors + * Number of descriptors per packet exceeds the configurable maximum + * descriptors per packet. This could indicate a software bug or a hardware + * failure. (The al_udma_m2s_max_descs_set() API is used to configure the + * maximum descriptors per packet) + */ +#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_MAX_DESC AL_BIT(14) +/** + * Packet length + * Packet length exceeds the configurable maximum packet size. The + * al_udma_m2s_packet_size_cfg_set() API is used to configure the maximum + * packet size) + */ +#define AL_INT_2ND_GROUP_A_M2S_PKT_LEN AL_BIT(13) +/** + * Prefetch AXI timeout + * Bus request to I/O Fabric timeout error + */ +#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_AXI_TO AL_BIT(12) +/** + * Prefetch AXI response + * Bus response from I/O Fabric error + */ +#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_AXI_RESP AL_BIT(11) +/** + * Prefetch AXI parity + * Bus parity error on descriptor being prefetched + */ +#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_AXI_PARITY AL_BIT(10) +/** + * Data AXI timeout + * Bus request to I/O Fabric timeout error + */ +#define AL_INT_2ND_GROUP_A_M2S_DATA_AXI_TO AL_BIT(9) +/** + * Data AXI response + * Bus response from I/O Fabric error + */ +#define AL_INT_2ND_GROUP_A_M2S_DATA_AXI_RESP AL_BIT(8) +/** + * Data AXI parity + * Bus parity error on data being read + */ +#define AL_INT_2ND_GROUP_A_M2S_SATA_AXI_PARITY AL_BIT(7) +/** + * Completion AXI timeout + * Bus request to I/O Fabric timeout error + */ +#define AL_INT_2ND_GROUP_A_M2S_CONPL_AXI_TO AL_BIT(6) +/** + * Completion AXI response + * Bus response from I/O Fabric error + */ +#define AL_INT_2ND_GROUP_A_M2S_COMPL_AXI_RESP AL_BIT(5) +/** + * Completion AXI parity + * Bus generator internal SRAM parity error + */ +#define AL_INT_2ND_GROUP_A_M2S_COMP_AXI_PARITY AL_BIT(4) +/** + * Stream timeout + * Application stream interface timeout indicating a failure at the Application + * layer (RAID, Ethernet etc) + */ +#define AL_INT_2ND_GROUP_A_M2S_STRM_TO AL_BIT(3) +/** + * Stream response + * Application stream interface response error indicating a failure at the + * Application layer (RAID, Ethernet etc) + */ +#define AL_INT_2ND_GROUP_A_M2S_STRM_RESP AL_BIT(2) +/** + * Stream parity + * Application stream interface parity error indicating a failure at the + * Application layer (RAID, Ethernet etc) + */ +#define AL_INT_2ND_GROUP_A_M2S_STRM_PARITY AL_BIT(1) +/** + * Stream completion mismatch + * Application stream interface, packet serial mismatch error indicating a + * failure at the Application layer (RAID, Ethernet etc) + */ +#define AL_INT_2ND_GROUP_A_M2S_STRM_COMPL_MISMATCH AL_BIT(0) + +/******************************************************************************* + * Secondary interrupt Controller, Group B, which holds the RX (S2M) error + * interrupt bits + ******************************************************************************/ + +/** Prefetch descriptor buffer parity error */ +#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_DESC_PARITY AL_BIT(30) +/** Completion coalescing buffer parity error */ +#define AL_INT_2ND_GROUP_B_S2M_COMPL_COAL_PARITY AL_BIT(29) +/** PRE-UNACK packets buffer parity error */ +#define AL_INT_2ND_GROUP_B_S2M_PRE_UNACK_PKT_PARITY AL_BIT(28) +/** UNACK packets buffer parity error */ +#define AL_INT_2ND_GROUP_B_S2M_UNACK_PKT_PARITY AL_BIT(27) +/** Data buffer parity error */ +#define AL_INT_2ND_GROUP_B_S2M_DATA_PARITY AL_BIT(26) +/** Data header buffer parity error */ +#define AL_INT_2ND_GROUP_B_S2M_DATA_HDR_PARITY AL_BIT(25) +/** + * Packet length + * Application stream interface, Data counter length mismatch with metadata + * packet length indicating a failure at the Application layer (RAID, Ethernet + * etc) + */ +#define AL_INT_2ND_GROUP_B_S2M_PKT_LEN AL_BIT(24) +/** + * Stream last + * Application stream interface, error in Last bit indication, this error is + * asserted when a 'last' indication is asserted on the stream interface + * (between the application and the UDMA) when the interface is not in the + * middle of packet, meaning that there was no 'first' indication before. This + * indicates a failure at the application layer. + */ +#define AL_INT_2ND_GROUP_B_S2M_STRM_LAST AL_BIT(23) +/** + * Stream first + * Application stream interface error in first bit indication, this error is + * asserted when a 'first' indication is asserted on the stream interface + * (between the application and the UDMA) when the interface is in the middle + * of packet, meaning that there was a 'first' indication before and the UDMA + * is waiting for a 'last' indication to end the packet. This indicates a + * failure at the application layer. + */ +#define AL_INT_2ND_GROUP_B_S2M_STRM_FIRST AL_BIT(22) +/** + * Stream data + * Application stream interface, error indication during data transaction + */ +#define AL_INT_2ND_GROUP_B_S2M_STRM_DATA AL_BIT(21) +/** + * Stream Data parity + * Application stream interface, parity error during data transaction + */ +#define AL_INT_2ND_GROUP_B_S2M_STRM_DATA_PARITY AL_BIT(20) +/** + * Stream Header error + * Application stream interface, error indication during header transaction + */ +#define AL_INT_2ND_GROUP_B_S2M_STRM_HDR AL_BIT(19) +/** + * Stream Header parity + * Application stream interface, parity error during header transaction + */ +#define AL_INT_2ND_GROUP_B_S2M_STRM_HDR_PARITY AL_BIT(18) +/** + * Completion UNACK + * Completion write, UNACK timeout due to completion FIFO back pressure + */ +#define AL_INT_2ND_GROUP_B_S2M_COMPL_UNACK AL_BIT(17) +/** + * Completion stream + * Completion write, UNACK timeout due to stream ACK FIFO back pressure + */ +#define AL_INT_2ND_GROUP_B_S2M_COMPL_STRM AL_BIT(16) +/** + * Completion AXI timeout + * Bus request to I/O Fabric timeout error + */ +#define AL_INT_2ND_GROUP_B_S2M_COMPL_AXI_TO AL_BIT(15) +/** + * Completion AXI response + * Bus response from I/O Fabric error + */ +#define AL_INT_2ND_GROUP_B_S2M_COMPL_AXI_RESP AL_BIT(14) +/** + * Completion AXI parity + * Completion Bus generator internal SRAM parity error + */ +#define AL_INT_2ND_GROUP_B_S2M_COMPL_AXI_PARITY AL_BIT(13) +/** + * Prefetch saturate + * Prefetch engine, packet length counter saturated (32 bit) , this is caused + * by an error at the application layer which sends packet data without + * 'last'/'first' indication. + */ +#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_SAT AL_BIT(12) +/** + * Prefetch ring ID + * Prefetch engine, Ring ID is not matching the expected RingID. This could + * indicate a software bug or hardware failure, unless the UDMA is working in a + * mode to ignore RingId (the al_udma_iofic_config() API can be used to + * configure the UDMA to ignore the Ring ID check) + */ +#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_RING_ID AL_BIT(11) +/** + * Prefetch AXI timeout + * Bus request to I/O Fabric timeout error + */ +#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_AXI_TO AL_BIT(10) +/** + * Prefetch AXI response + * Bus response from I/O Fabric error + */ +#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_AXI_RESP AL_BIT(9) +/** + * Prefetch AXI parity + * Bus parity error on descriptor being prefetched + */ +#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_AXI_PARITY AL_BIT(8) +/** + * No descriptors hint + * Data write, Hint to the SW that there are not enough descriptors in the + * queue for the current received packet. This is considered a hint and not an + * error, as it could be a normal situation in certain application. The S2M + * UDMA behavior when it runs out of Rx Descriptor is controlled by driver + * which can use this hint to add more descriptors to the Rx queue. + */ +#define AL_INT_2ND_GROUP_B_S2M_NO_DESC_HINT AL_BIT(7) +/** + * No descriptors timeout + * Data write, Timeout indication when there are not enough descriptors for the + * current packet and the timeout expires. The S2M UDMA behavior when it runs + * out of Rx Descriptor is controlled by driver which can use this hint to add + * more descriptors to the Rx queue. The al_udma_s2m_no_desc_cfg_set() is used + * to configure theUDMA S2M timeout and behavior when there are no Rx + * descriptors for the received packet. + */ +#define AL_INT_2ND_GROUP_B_S2M_NO_DESC_TO AL_BIT(6) +/** + * Promotion indication + * Data write, the data write engine checks the queue number of the two packets + * at the head of the data FIFO, the data write engine notify the prefetch + * engine to promote these queue numbers in the prefetch scheduler to make sure + * that these queue will have RX descriptors for these packets. This error + * indicates that the prefetch promotion didn't work for the second packet in + * the FIFO. This is an indication used for system debug and not an error. + */ +#define AL_INT_2ND_GROUP_B_S2M_PROM_IND AL_BIT(5) +/** + * Header split ignored + * Data write, The application requested header split but the buffer descriptor + * doesn't include a second buffer for the header + */ +#define AL_INT_2ND_GROUP_B_S2M_HDR_SPLT_IGNORED AL_BIT(4) +/** + * Header split length + * Data write, The application requested header split and the length of the + * second buffer allocated for the header is not enough for the requested + * header length. The remaining of the header is written to buffer 1 (data + * buffer). + */ +#define AL_INT_2ND_GROUP_B_S2M_HDR_SPLT_LEN AL_BIT(3) +/** + * Data AXI timeout + * Bus request to I/O Fabric timeout error + */ +#define AL_INT_2ND_GROUP_B_S2M_DATA_AXI_TO AL_BIT(2) +/** + * Data AXI response + * Bus response from I/O Fabric error + */ +#define AL_INT_2ND_GROUP_B_S2M_DATA_AXI_RESP AL_BIT(1) +/** + * Data AXI parity + * Bus parity error on data being read + */ +#define AL_INT_2ND_GROUP_B_S2M_DATA_AXI_PARITY AL_BIT(0) + +/******************************************************************************* + * Configurations + ******************************************************************************/ + +/** + * Configure the UDMA interrupt controller registers, interrupts will are kept + * masked. + * This is a static setting that should be called while initialized the + * interrupt controller within a given UDMA, and should not be modified during + * runtime unless the UDMA is completely disabled. The first argument sets the + * interrupt and MSIX modes. The m2s/s2m errors/abort are a set of bit-wise + * masks to define the behaviour of the UDMA once an error happens: The _abort + * will put the UDMA in abort state once an error happens The _error bitmask + * will indicate and error in the secondary cause register but will not abort. + * The bit-mask that the _errors_disable and _aborts_disable are described in + * 'AL_INT_2ND_GROUP_A_*' and 'AL_INT_2ND_GROUP_B_*' + * + * @param regs pointer to unit registers + * @param mode interrupt scheme mode (legacy, MSI-X..) + * @param m2s_errors_disable + * This is a bit-wise mask, to indicate which one of the error causes in + * secondary interrupt group_A should generate an interrupt. When a bit is + * set, the error cause is ignored. + * Recommended value: 0 (enable all errors). + * @param m2s_aborts_disable + * This is a bit-wise mask, to indicate which one of the error causes in + * secondary interrupt group_A should automatically put the UDMA in + * abort state. When a bit is set, the error cause does cause an abort. + * Recommended value: 0 (enable all aborts). + * @param s2m_errors_disable + * This is a bit-wise mask, to indicate which one of the error causes in + * secondary interrupt group_A should generate an interrupt. When a bit is + * set, the error cause is ignored. + * Recommended value: 0xE0 (disable hint errors). + * @param s2m_aborts_disable + * This is a bit-wise mask, to indicate which one of the error causes in + * secondary interrupt group_A should automatically put the UDMA in + * abort state. When a bit is set, the error cause does cause an abort. + * Recommended value: 0xE0 (disable hint aborts). + * + * @return 0 on success. -EINVAL otherwise. + */ +int al_udma_iofic_config(struct unit_regs __iomem *regs, + enum al_iofic_mode mode, + uint32_t m2s_errors_disable, + uint32_t m2s_aborts_disable, + uint32_t s2m_errors_disable, + uint32_t s2m_aborts_disable); +/** + * return the offset of the unmask register for a given group. + * this function can be used when the upper layer wants to directly + * access the unmask regiter and bypass the al_udma_iofic_unmask() API. + * + * @param regs pointer to udma registers + * @param level the interrupt controller level (primary / secondary) + * @param group the interrupt group ('AL_INT_GROUP_*') + * @return the offset of the unmask register. + */ +uint32_t __iomem * al_udma_iofic_unmask_offset_get( + struct unit_regs __iomem *regs, + enum al_udma_iofic_level level, + int group); + +/** + * Get the interrupt controller base address for either the primary or secondary + * interrupt controller + * + * @param regs pointer to udma unit registers + * @param level the interrupt controller level (primary / secondary) + * + * @returns The interrupt controller base address + * + */ +static INLINE void __iomem *al_udma_iofic_reg_base_get( + struct unit_regs __iomem *regs, + enum al_udma_iofic_level level) +{ + void __iomem *iofic_regs = (level == AL_UDMA_IOFIC_LEVEL_PRIMARY) ? + (void __iomem *)®s->gen.interrupt_regs.main_iofic : + (void __iomem *)®s->gen.interrupt_regs.secondary_iofic_ctrl; + + return iofic_regs; +} + +/** + * Check the interrupt controller level/group validity + * + * @param level the interrupt controller level (primary / secondary) + * @param group the interrupt group ('AL_INT_GROUP_*') + * + * @returns 0 - invalid, 1 - valid + * + */ +static INLINE int al_udma_iofic_level_and_group_valid( + enum al_udma_iofic_level level, + int group) +{ + if (((level == AL_UDMA_IOFIC_LEVEL_PRIMARY) && (group >= 0) && (group < 4)) || + ((level == AL_UDMA_IOFIC_LEVEL_SECONDARY) && (group >= 0) && (group < 2))) + return 1; + + return 0; +} +/** + * unmask specific interrupts for a given group + * this functions uses the interrupt mask clear register to guarantee atomicity + * it's safe to call it while the mask is changed by the HW (auto mask) or another cpu. + * + * @param regs pointer to udma unit registers + * @param level the interrupt controller level (primary / secondary) + * @param group the interrupt group ('AL_INT_GROUP_*') + * @param mask bitwise of interrupts to unmask, set bits will be unmasked. + */ +static INLINE void al_udma_iofic_unmask( + struct unit_regs __iomem *regs, + enum al_udma_iofic_level level, + int group, + uint32_t mask) +{ + al_assert(al_udma_iofic_level_and_group_valid(level, group)); + al_iofic_unmask(al_udma_iofic_reg_base_get(regs, level), group, mask); +} + +/** + * mask specific interrupts for a given group + * this functions modifies interrupt mask register, the callee must make sure + * the mask is not changed by another cpu. + * + * @param regs pointer to udma unit registers + * @param level the interrupt controller level (primary / secondary) + * @param group the interrupt group ('AL_INT_GROUP_*') + * @param mask bitwise of interrupts to mask, set bits will be masked. + */ +static INLINE void al_udma_iofic_mask( + struct unit_regs __iomem *regs, + enum al_udma_iofic_level level, + int group, + uint32_t mask) +{ + al_assert(al_udma_iofic_level_and_group_valid(level, group)); + al_iofic_mask(al_udma_iofic_reg_base_get(regs, level), group, mask); +} + +/** + * read interrupt cause register for a given group + * this will clear the set bits if the Clear on Read mode enabled. + * @param regs pointer to udma unit registers + * @param level the interrupt controller level (primary / secondary) + * @param group the interrupt group ('AL_INT_GROUP_*') + */ +static INLINE uint32_t al_udma_iofic_read_cause( + struct unit_regs __iomem *regs, + enum al_udma_iofic_level level, + int group) +{ + al_assert(al_udma_iofic_level_and_group_valid(level, group)); + return al_iofic_read_cause(al_udma_iofic_reg_base_get(regs, level), group); +} + +#endif +/** @} end of UDMA group */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_iofic_regs.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_iofic_regs.h new file mode 100644 index 00000000000000..802b6590e0c75d --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_iofic_regs.h @@ -0,0 +1,65 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + + +#ifndef __AL_HAL_UDMA_IOFIC_REG_H +#define __AL_HAL_UDMA_IOFIC_REG_H + +#include +#ifdef __cplusplus +extern "C" { +#endif + +/** This structure covers all interrupt registers of a given UDMA, which is + * built of an al_iofic_regs, which is the common I/O Fabric Interrupt + * controller (IOFIC), and additional two interrupts groups dedicated for the + * application-specific engine attached to the UDMA, the interrupt summary + * of those two groups routed to gourp D of the main controller. + */ +struct udma_iofic_regs { + struct al_iofic_regs main_iofic; + uint32_t rsrvd1[(0x1c00) >> 2]; + struct al_iofic_grp_ctrl secondary_iofic_ctrl[2]; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_UDMA_IOFIC_REG_H */ + + + + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_regs.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_regs.h new file mode 100644 index 00000000000000..15895a9febb604 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_regs.h @@ -0,0 +1,99 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @{ + * @file al_hal_udma_regs.h + * + * @brief udma registers definition + * + * + */ +#ifndef __AL_HAL_UDMA_REG_H +#define __AL_HAL_UDMA_REG_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** UDMA registers, either m2s or s2m */ +union udma_regs { + struct udma_m2s_regs m2s; + struct udma_s2m_regs s2m; +}; + +struct unit_regs { + struct udma_m2s_regs m2s; + uint32_t rsrvd0[(0x10000 - sizeof(struct udma_m2s_regs)) >> 2]; + struct udma_s2m_regs s2m; + uint32_t rsrvd1[((0x1C000 - 0x10000) - sizeof(struct udma_s2m_regs)) >> 2]; + struct udma_gen_regs gen; +}; + +/** UDMA submission and completion registers, M2S and S2M UDMAs have same stucture */ +struct udma_rings_regs { + uint32_t rsrvd0[8]; + uint32_t cfg; /* Descriptor ring configuration */ + uint32_t status; /* Descriptor ring status and information */ + uint32_t drbp_low; /* Descriptor Ring Base Pointer [31:4] */ + uint32_t drbp_high; /* Descriptor Ring Base Pointer [63:32] */ + uint32_t drl; /* Descriptor Ring Length[23:2] */ + uint32_t drhp; /* Descriptor Ring Head Pointer */ + uint32_t drtp_inc; /* Descriptor Tail Pointer increment */ + uint32_t drtp; /* Descriptor Tail Pointer */ + uint32_t dcp; /* Descriptor Current Pointer */ + uint32_t crbp_low; /* Completion Ring Base Pointer [31:4] */ + uint32_t crbp_high; /* Completion Ring Base Pointer [63:32] */ + uint32_t crhp; /* Completion Ring Head Pointer */ + uint32_t crhp_internal; /* Completion Ring Head Pointer internal, before AX ... */ +}; + +/** M2S and S2M generic structure of Q registers */ +union udma_q_regs { + struct udma_rings_regs rings; + struct udma_m2s_q m2s_q; + struct udma_s2m_q s2m_q; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_UDMA_REG_H */ +/** @} end of UDMA group */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_regs_gen.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_regs_gen.h new file mode 100644 index 00000000000000..067138339a2b5d --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_regs_gen.h @@ -0,0 +1,413 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @file al_hal_udma_regs_gen.h + * + * @brief C Header file for the UDMA general registers + * + */ + +#ifndef __AL_HAL_UDMA_GEN_REG_H +#define __AL_HAL_UDMA_GEN_REG_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + + + +struct udma_gen_dma_misc { + /* [0x0] Reserved register for the interrupt controller */ + uint32_t int_cfg; + /* [0x4] Revision register */ + uint32_t revision; + /* [0x8] Reserved for future use */ + uint32_t general_cfg_1; + /* [0xc] Reserved for future use */ + uint32_t general_cfg_2; + /* [0x10] Reserved for future use */ + uint32_t general_cfg_3; + /* [0x14] Reserved for future use */ + uint32_t general_cfg_4; + /* [0x18] General timer configuration */ + uint32_t general_cfg_5; + uint32_t rsrvd[57]; +}; +struct udma_gen_mailbox { + /* + * [0x0] Mailbox interrupt generator. + * Generates interrupt to neighbor DMA + */ + uint32_t interrupt; + /* [0x4] Mailbox message data out */ + uint32_t msg_out; + /* [0x8] Mailbox message data in */ + uint32_t msg_in; + uint32_t rsrvd[13]; +}; +struct udma_gen_axi { + /* [0x0] Configuration of the AXI masters */ + uint32_t cfg_1; + /* [0x4] Configuration of the AXI masters */ + uint32_t cfg_2; + /* [0x8] Configuration of the AXI masters. Endianess configuration */ + uint32_t endian_cfg; + uint32_t rsrvd[61]; +}; +struct udma_gen_sram_ctrl { + /* [0x0] Timing configuration */ + uint32_t timing; +}; +struct udma_gen_vmid { + /* [0x0] VMID control */ + uint32_t cfg_vmid_0; + /* [0x4] TX queue 0/1 VMID */ + uint32_t cfg_vmid_1; + /* [0x8] TX queue 2/3 VMID */ + uint32_t cfg_vmid_2; + /* [0xc] RX queue 0/1 VMID */ + uint32_t cfg_vmid_3; + /* [0x10] RX queue 2/3 VMID */ + uint32_t cfg_vmid_4; +}; +struct udma_gen_vmaddr { + /* [0x0] TX queue 0/1 VMADDR */ + uint32_t cfg_vmaddr_0; + /* [0x4] TX queue 2/3 VMADDR */ + uint32_t cfg_vmaddr_1; + /* [0x8] RX queue 0/1 VMADDR */ + uint32_t cfg_vmaddr_2; + /* [0xc] RX queue 2/3 VMADDR */ + uint32_t cfg_vmaddr_3; +}; +struct udma_gen_vmpr { + /* [0x0] TX VMPR control */ + uint32_t cfg_vmpr_0; + /* [0x4] TX VMPR Address High Regsiter */ + uint32_t cfg_vmpr_1; + /* [0x8] TX queue VMID values */ + uint32_t cfg_vmpr_2; + /* [0xc] TX queue VMID values */ + uint32_t cfg_vmpr_3; + /* [0x10] RX VMPR control */ + uint32_t cfg_vmpr_4; + /* [0x14] RX VMPR Buffer2 MSB address */ + uint32_t cfg_vmpr_5; + /* [0x18] RX queue VMID values */ + uint32_t cfg_vmpr_6; + /* [0x1c] RX queue BUF1 VMID values */ + uint32_t cfg_vmpr_7; + /* [0x20] RX queue BUF2 VMID values */ + uint32_t cfg_vmpr_8; + /* [0x24] RX queue Direct Data Placement VMID values */ + uint32_t cfg_vmpr_9; + /* [0x28] RX VMPR BUF1 Address High Regsiter */ + uint32_t cfg_vmpr_10; + /* [0x2c] RX VMPR BUF2 Address High Regsiter */ + uint32_t cfg_vmpr_11; + /* [0x30] RX VMPR DDP Address High Regsiter */ + uint32_t cfg_vmpr_12; + uint32_t rsrvd[3]; +}; + +struct udma_gen_regs { + struct udma_iofic_regs interrupt_regs; /* [0x0000] */ + struct udma_gen_dma_misc dma_misc; /* [0x2080] */ + struct udma_gen_mailbox mailbox[4]; /* [0x2180] */ + struct udma_gen_axi axi; /* [0x2280] */ + struct udma_gen_sram_ctrl sram_ctrl[25]; /* [0x2380] */ + uint32_t rsrvd_1[2]; + struct udma_gen_vmid vmid; /* [0x23ec] */ + struct udma_gen_vmaddr vmaddr; /* [0x2400] */ + uint32_t rsrvd_2[252]; + struct udma_gen_vmpr vmpr[4]; /* [0x2800] */ +}; + + +/* +* Registers Fields +*/ + + +/**** int_cfg register ****/ +/* + * MSIX data width + * 1 - 64 bit + * 0 – 32 bit + */ +#define UDMA_GEN_DMA_MISC_INT_CFG_MSIX_64 (1 << 0) +/* General configuration */ +#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_3_1_MASK 0x0000000E +#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_3_1_SHIFT 1 +/* MSIx AXI QoS */ +#define UDMA_GEN_DMA_MISC_INT_CFG_MSIX_AXI_QOS_MASK 0x00000070 +#define UDMA_GEN_DMA_MISC_INT_CFG_MSIX_AXI_QOS_SHIFT 4 + +#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_31_7_MASK 0xFFFFFF80 +#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_31_7_SHIFT 7 + +/**** revision register ****/ +/* Design programming interface revision ID */ +#define UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_MASK 0x00000FFF +#define UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_SHIFT 0 +/* Design minor revision ID */ +#define UDMA_GEN_DMA_MISC_REVISION_MINOR_ID_MASK 0x00FFF000 +#define UDMA_GEN_DMA_MISC_REVISION_MINOR_ID_SHIFT 12 +/* Design major revision ID */ +#define UDMA_GEN_DMA_MISC_REVISION_MAJOR_ID_MASK 0xFF000000 +#define UDMA_GEN_DMA_MISC_REVISION_MAJOR_ID_SHIFT 24 + +/**** Interrupt register ****/ +/* Generate interrupt to another DMA */ +#define UDMA_GEN_MAILBOX_INTERRUPT_SET (1 << 0) + +/**** cfg_2 register ****/ +/* + * Enable arbitration promotion. + * Increment master priority after configured number of arbitration cycles + */ +#define UDMA_GEN_AXI_CFG_2_ARB_PROMOTION_MASK 0x0000000F +#define UDMA_GEN_AXI_CFG_2_ARB_PROMOTION_SHIFT 0 + +/**** endian_cfg register ****/ +/* Swap M2S descriptor read and completion descriptor write. */ +#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DESC (1 << 0) +/* Swap M2S data read. */ +#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DATA (1 << 1) +/* Swap S2M descriptor read and completion descriptor write. */ +#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DESC (1 << 2) +/* Swap S2M data write. */ +#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DATA (1 << 3) +/* + * Swap 32 or 64 bit mode: + * 0 - Swap groups of 4 bytes + * 1 - Swap groups of 8 bytes + */ +#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_64B_EN (1 << 4) + +/**** timing register ****/ +/* Write margin */ +#define UDMA_GEN_SRAM_CTRL_TIMING_RMA_MASK 0x0000000F +#define UDMA_GEN_SRAM_CTRL_TIMING_RMA_SHIFT 0 +/* Write margin enable */ +#define UDMA_GEN_SRAM_CTRL_TIMING_RMEA (1 << 8) +/* Read margin */ +#define UDMA_GEN_SRAM_CTRL_TIMING_RMB_MASK 0x000F0000 +#define UDMA_GEN_SRAM_CTRL_TIMING_RMB_SHIFT 16 +/* Read margin enable */ +#define UDMA_GEN_SRAM_CTRL_TIMING_RMEB (1 << 24) + +/**** cfg_vmid_0 register ****/ +/* For M2S queues 3:0, enable usage of the VMID from the buffer address 63:56 */ +#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_MASK 0x0000000F +#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_SHIFT 0 +/* + * For M2S queues 3:0, enable usage of the VMID from the configuration register + * (cfg_vmid_1/2 used for M2S queue_x) + */ +#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_MASK 0x000000F0 +#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_SHIFT 4 +/* use VMID_n [7:0] from MSI-X Controller for MSI-X message */ +#define UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL (1 << 8) +/* Enable write to all VMID_n registers in the MSI-X Controller */ +#define UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN (1 << 9) +/* For S2M queues 3:0, enable usage of the VMID from the buffer address 63:56 */ +#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_MASK 0x000F0000 +#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_SHIFT 16 +/* + * For S2M queues 3:0, enable usage of the VMID from the configuration register + * (cfg_vmid_3/4 used for M2S queue_x) + */ +#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_MASK 0x00F00000 +#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_SHIFT 20 + +/**** cfg_vmid_1 register ****/ +/* TX queue 0 VMID value */ +#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_MASK 0x0000FFFF +#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_SHIFT 0 +/* TX queue 1 VMID value */ +#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_MASK 0xFFFF0000 +#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_SHIFT 16 + +/**** cfg_vmid_2 register ****/ +/* TX queue 2 VMID value */ +#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_MASK 0x0000FFFF +#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_SHIFT 0 +/* TX queue 3 VMID value */ +#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_MASK 0xFFFF0000 +#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_SHIFT 16 + +/**** cfg_vmid_3 register ****/ +/* RX queue 0 VMID value */ +#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_MASK 0x0000FFFF +#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_SHIFT 0 +/* RX queue 1 VMID value */ +#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_MASK 0xFFFF0000 +#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_SHIFT 16 + +/**** cfg_vmid_4 register ****/ +/* RX queue 2 VMID value */ +#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_MASK 0x0000FFFF +#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_SHIFT 0 +/* RX queue 3 VMID value */ +#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_MASK 0xFFFF0000 +#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_SHIFT 16 + +/**** cfg_vmaddr_0 register ****/ +/* TX queue 0 VMADDR value */ +#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_0_VMADDR_MASK 0x0000FFFF +#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_0_VMADDR_SHIFT 0 +/* TX queue 1 VMADDR value */ +#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_1_VMADDR_MASK 0xFFFF0000 +#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_1_VMADDR_SHIFT 16 + +/**** cfg_vmaddr_1 register ****/ +/* TX queue 2 VMADDR value */ +#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_2_VMADDR_MASK 0x0000FFFF +#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_2_VMADDR_SHIFT 0 +/* TX queue 3 VMADDR value */ +#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_3_VMADDR_MASK 0xFFFF0000 +#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_3_VMADDR_SHIFT 16 + +/**** cfg_vmaddr_2 register ****/ +/* RX queue 0 VMADDR value */ +#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_0_VMADDR_MASK 0x0000FFFF +#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_0_VMADDR_SHIFT 0 +/* RX queue 1 VMADDR value */ +#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_1_VMADDR_MASK 0xFFFF0000 +#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_1_VMADDR_SHIFT 16 + +/**** cfg_vmaddr_3 register ****/ +/* RX queue 2 VMADDR value */ +#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_2_VMADDR_MASK 0x0000FFFF +#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_2_VMADDR_SHIFT 0 +/* RX queue 3 VMADDR value */ +#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_3_VMADDR_MASK 0xFFFF0000 +#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_3_VMADDR_SHIFT 16 + +/**** cfg_vmpr_0 register ****/ +/* TX High Address Select Per Q */ +#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_MASK 0x0000003F +#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_SHIFT 0 +/* TX Data VMID Enable Per Q */ +#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN (1 << 7) +/* TX Prefetch VMID Enable Per Q */ +#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN (1 << 28) +/* TX Completions VMID Enable Per Q */ +#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN (1 << 29) + +/**** cfg_vmpr_2 register ****/ +/* TX queue Prefetch VMID */ +#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_MASK 0x0000FFFF +#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_SHIFT 0 +/* TX queue Completion VMID */ +#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_MASK 0xFFFF0000 +#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_SHIFT 16 + +/**** cfg_vmpr_3 register ****/ +/* TX queue Data VMID */ +#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_MASK 0x0000FFFF +#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SHIFT 0 +/* TX queue Data VMID select */ +#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_MASK 0xFFFF0000 +#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_SHIFT 16 + +/**** cfg_vmpr_4 register ****/ +/* RX Data Buffer1 - High Address Select Per Q */ +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_MASK 0x0000003F +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_SHIFT 0 +/* RX Data Buffer1 VMID Enable Per Q */ +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN (1 << 7) +/* RX Data Buffer2 - High Address Select Per Q */ +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_MASK 0x00003F00 +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_SHIFT 8 +/* RX Data Buffer2 VMID Enable Per Q */ +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN (1 << 15) +/* RX Direct Data Placement - High Address Select Per Q */ +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_MASK 0x003F0000 +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_SHIFT 16 +/* RX Direct Data Placement VMID Enable Per Q */ +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN (1 << 23) +/* RX Buffer 2 MSB address word selects per bytes, per queue */ +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_MASK 0x0F000000 +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_SHIFT 24 +/* RX Prefetch VMID Enable Per Q */ +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN (1 << 28) +/* RX Completions VMID Enable Per Q */ +#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN (1 << 29) + +/**** cfg_vmpr_6 register ****/ +/* RX queue Prefetch VMID */ +#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_MASK 0x0000FFFF +#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_SHIFT 0 +/* RX queue Completion VMID */ +#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_MASK 0xFFFF0000 +#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_SHIFT 16 + +/**** cfg_vmpr_7 register ****/ +/* RX queue Data Buffer 1 VMID */ +#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_MASK 0x0000FFFF +#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SHIFT 0 +/* RX queue Data Buffer 1 VMID select */ +#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_MASK 0xFFFF0000 +#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_SHIFT 16 + +/**** cfg_vmpr_8 register ****/ +/* RX queue Data Buffer 2 VMID */ +#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_MASK 0x0000FFFF +#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SHIFT 0 +/* RX queue Data Buffer 2 VMID select */ +#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_MASK 0xFFFF0000 +#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_SHIFT 16 + +/**** cfg_vmpr_9 register ****/ +/* RX queue DDP VMID */ +#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_MASK 0x0000FFFF +#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SHIFT 0 +/* RX queue DDP VMID select */ +#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_MASK 0xFFFF0000 +#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_SHIFT 16 + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_UDMA_GEN_REG_H */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_regs_m2s.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_regs_m2s.h new file mode 100644 index 00000000000000..dade280a962b57 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_regs_m2s.h @@ -0,0 +1,1158 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @file al_hal_udma_regs_m2s.h + * + * @brief C Header file for the UDMA M2S registers + * + */ + +#ifndef __AL_HAL_UDMA_M2S_REG_H +#define __AL_HAL_UDMA_M2S_REG_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + + + +struct udma_axi_m2s { + /* [0x0] Completion write master configuration */ + uint32_t comp_wr_cfg_1; + /* [0x4] Completion write master configuration */ + uint32_t comp_wr_cfg_2; + /* [0x8] Data read master configuration */ + uint32_t data_rd_cfg_1; + /* [0xc] Data read master configuration */ + uint32_t data_rd_cfg_2; + /* [0x10] Descriptor read master configuration */ + uint32_t desc_rd_cfg_1; + /* [0x14] Descriptor read master configuration */ + uint32_t desc_rd_cfg_2; + /* [0x18] Data read master configuration */ + uint32_t data_rd_cfg; + /* [0x1c] Descriptors read master configuration */ + uint32_t desc_rd_cfg_3; + /* [0x20] Descriptors write master configuration (completion) */ + uint32_t desc_wr_cfg_1; + /* [0x24] AXI outstanding configuration */ + uint32_t ostand_cfg; + uint32_t rsrvd[54]; +}; +struct udma_m2s { + /* + * [0x0] DMA state. + * 00 - No pending tasks + * 01 – Normal (active) + * 10 – Abort (error condition) + * 11 – Reserved + */ + uint32_t state; + /* [0x4] CPU request to change DMA state */ + uint32_t change_state; + uint32_t rsrvd_0; + /* + * [0xc] M2S DMA error log mask. + * Each error has an interrupt controller cause bit. + * This register determines if these errors cause the M2S DMA to log the + * error condition. + * 0 - Log is enabled. + * 1 - Log is masked. + */ + uint32_t err_log_mask; + uint32_t rsrvd_1; + /* + * [0x14] DMA header log. + * Sample the packet header that caused the error. + */ + uint32_t log_0; + /* + * [0x18] DMA header log. + * Sample the packet header that caused the error. + */ + uint32_t log_1; + /* + * [0x1c] DMA header log. + * Sample the packet header that caused the error. + */ + uint32_t log_2; + /* + * [0x20] DMA header log. + * Sample the packet header that caused the error. + */ + uint32_t log_3; + /* [0x24] DMA clear error log */ + uint32_t clear_err_log; + /* [0x28] M2S data FIFO status */ + uint32_t data_fifo_status; + /* [0x2c] M2S header FIFO status */ + uint32_t header_fifo_status; + /* [0x30] M2S unack FIFO status */ + uint32_t unack_fifo_status; + /* [0x34] Select queue for debug */ + uint32_t indirect_ctrl; + /* + * [0x38] M2S prefetch FIFO status. + * Status of the selected queue in M2S_indirect_ctrl + */ + uint32_t sel_pref_fifo_status; + /* + * [0x3c] M2S completion FIFO status. + * Status of the selected queue in M2S_indirect_ctrl + */ + uint32_t sel_comp_fifo_status; + /* + * [0x40] M2S rate limit status. + * Status of the selected queue in M2S_indirect_ctrl + */ + uint32_t sel_rate_limit_status; + /* + * [0x44] M2S DWRR scheduler status. + * Status of the selected queue in M2S_indirect_ctrl + */ + uint32_t sel_dwrr_status; + /* [0x48] M2S state machine and FIFO clear control */ + uint32_t clear_ctrl; + /* [0x4c] Misc Check enable */ + uint32_t check_en; + /* [0x50] M2S FIFO enable control, internal */ + uint32_t fifo_en; + /* [0x54] M2S packet length configuration */ + uint32_t cfg_len; + /* [0x58] Stream interface configuration */ + uint32_t stream_cfg; + uint32_t rsrvd[41]; +}; +struct udma_m2s_rd { + /* [0x0] M2S descriptor prefetch configuration */ + uint32_t desc_pref_cfg_1; + /* [0x4] M2S descriptor prefetch configuration */ + uint32_t desc_pref_cfg_2; + /* [0x8] M2S descriptor prefetch configuration */ + uint32_t desc_pref_cfg_3; + uint32_t rsrvd_0; + /* [0x10] Data burst read configuration */ + uint32_t data_cfg; + uint32_t rsrvd[11]; +}; +struct udma_m2s_dwrr { + /* [0x0] Tx DMA DWRR scheduler configuration */ + uint32_t cfg_sched; + /* [0x4] Token bucket rate limit control */ + uint32_t ctrl_deficit_cnt; + uint32_t rsrvd[14]; +}; +struct udma_m2s_rate_limiter { + /* [0x0] Token bucket rate limit configuration */ + uint32_t gen_cfg; + /* + * [0x4] Token bucket rate limit control. + * Controls the cycle counters. + */ + uint32_t ctrl_cycle_cnt; + /* + * [0x8] Token bucket rate limit control. + * Controls the token bucket counter. + */ + uint32_t ctrl_token; + uint32_t rsrvd[13]; +}; + +struct udma_rlimit_common { + /* [0x0] Token bucket configuration */ + uint32_t cfg_1s; + /* [0x4] Token bucket rate limit configuration */ + uint32_t cfg_cycle; + /* [0x8] Token bucket rate limit configuration */ + uint32_t cfg_token_size_1; + /* [0xc] Token bucket rate limit configuration */ + uint32_t cfg_token_size_2; + /* [0x10] Token bucket rate limit configuration */ + uint32_t sw_ctrl; + /* + * [0x14] Mask the different types of rate limiter. + * 0 - Rate limit is active. + * 1 - Rate limit is masked. + */ + uint32_t mask; +}; + +struct udma_m2s_stream_rate_limiter { + struct udma_rlimit_common rlimit; + uint32_t rsrvd[10]; +}; +struct udma_m2s_comp { + /* [0x0] Completion controller configuration */ + uint32_t cfg_1c; + /* [0x4] Completion controller coalescing configuration */ + uint32_t cfg_coal; + /* [0x8] Completion controller application acknowledge configuration */ + uint32_t cfg_application_ack; + uint32_t rsrvd[61]; +}; +struct udma_m2s_stat { + /* [0x0] Statistics counters configuration */ + uint32_t cfg_st; + /* [0x4] Counting number of descriptors with First-bit set. */ + uint32_t tx_pkt; + /* + * [0x8] Counting the net length of the data buffers [64-bit] + * Should be read before tx_bytes_high + */ + uint32_t tx_bytes_low; + /* + * [0xc] Counting the net length of the data buffers [64-bit], + * Should be read after tx_bytes_low (value is sampled when reading + * Should be read before tx_bytes_low + */ + uint32_t tx_bytes_high; + /* [0x10] Total number of descriptors read from the host memory */ + uint32_t prefed_desc; + /* [0x14] Number of packets read from the unack FIFO */ + uint32_t comp_pkt; + /* [0x18] Number of descriptors written into the completion ring */ + uint32_t comp_desc; + /* + * [0x1c] Number of acknowledged packets. + * (acknowledge received from the stream interface) + */ + uint32_t ack_pkts; + uint32_t rsrvd[56]; +}; +struct udma_m2s_feature { + /* + * [0x0] M2S Feature register. + * M2S instantiation parameters + */ + uint32_t reg_1; + /* [0x4] Reserved M2S feature register */ + uint32_t reg_2; + /* + * [0x8] M2S Feature register. + * M2S instantiation parameters + */ + uint32_t reg_3; + /* + * [0xc] M2S Feature register. + * M2S instantiation parameters + */ + uint32_t reg_4; + /* + * [0x10] M2S Feature register. + * M2S instantiation parameters + */ + uint32_t reg_5; + uint32_t rsrvd[59]; +}; +struct udma_m2s_q { + uint32_t rsrvd_0[8]; + /* [0x20] M2S descriptor ring configuration */ + uint32_t cfg; + /* [0x24] M2S descriptor ring status and information */ + uint32_t status; + /* [0x28] TX Descriptor Ring Base Pointer [31:4] */ + uint32_t tdrbp_low; + /* [0x2c] TX Descriptor Ring Base Pointer [63:32] */ + uint32_t tdrbp_high; + /* + * [0x30] TX Descriptor Ring Length[23:2] + */ + uint32_t tdrl; + /* [0x34] TX Descriptor Ring Head Pointer */ + uint32_t tdrhp; + /* [0x38] Tx Descriptor Tail Pointer increment */ + uint32_t tdrtp_inc; + /* [0x3c] Tx Descriptor Tail Pointer */ + uint32_t tdrtp; + /* [0x40] TX Descriptor Current Pointer */ + uint32_t tdcp; + /* [0x44] Tx Completion Ring Base Pointer [31:4] */ + uint32_t tcrbp_low; + /* [0x48] TX Completion Ring Base Pointer [63:32] */ + uint32_t tcrbp_high; + /* [0x4c] TX Completion Ring Head Pointer */ + uint32_t tcrhp; + /* + * [0x50] Tx Completion Ring Head Pointer internal (Before the + * coalescing FIFO) + */ + uint32_t tcrhp_internal; + uint32_t rsrvd_1[3]; + /* [0x60] Rate limit configuration */ + struct udma_rlimit_common rlimit; + uint32_t rsrvd_2[2]; + /* [0x80] DWRR scheduler configuration */ + uint32_t dwrr_cfg_1; + /* [0x84] DWRR scheduler configuration */ + uint32_t dwrr_cfg_2; + /* [0x88] DWRR scheduler configuration */ + uint32_t dwrr_cfg_3; + /* [0x8c] DWRR scheduler software control */ + uint32_t dwrr_sw_ctrl; + uint32_t rsrvd_3[4]; + /* [0xa0] Completion controller configuration */ + uint32_t comp_cfg; + uint32_t rsrvd_4[3]; + /* [0xb0] SW control */ + uint32_t q_sw_ctrl; + uint32_t rsrvd_5[3]; + /* [0xc0] Number of M2S Tx packets after the scheduler */ + uint32_t q_tx_pkt; + uint32_t rsrvd[975]; +}; + +struct udma_m2s_regs { + uint32_t rsrvd_0[64]; + struct udma_axi_m2s axi_m2s; /* [0x100] */ + struct udma_m2s m2s; /* [0x200] */ + struct udma_m2s_rd m2s_rd; /* [0x300] */ + struct udma_m2s_dwrr m2s_dwrr; /* [0x340] */ + struct udma_m2s_rate_limiter m2s_rate_limiter; /* [0x380] */ + struct udma_m2s_stream_rate_limiter m2s_stream_rate_limiter; /* [0x3c0] */ + struct udma_m2s_comp m2s_comp; /* [0x400] */ + struct udma_m2s_stat m2s_stat; /* [0x500] */ + struct udma_m2s_feature m2s_feature; /* [0x600] */ + uint32_t rsrvd_1[576]; + struct udma_m2s_q m2s_q[4]; /* [0x1000] */ +}; + + +/* +* Registers Fields +*/ + + +/**** comp_wr_cfg_1 register ****/ +/* AXI write ID (AWID) */ +#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_MASK 0x000000FF +#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_SHIFT 0 +/* Cache Type */ +#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_MASK 0x000F0000 +#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_SHIFT 16 +/* Burst type */ +#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK 0x03000000 +#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_SHIFT 24 + +/**** comp_wr_cfg_2 register ****/ +/* User extension */ +#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_MASK 0x000FFFFF +#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_SHIFT 0 +/* Bus size, 128-bit */ +#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_MASK 0x00700000 +#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_SHIFT 20 +/* + * AXI Master QoS. + * Used for arbitration between AXI masters + */ +#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_MASK 0x07000000 +#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_SHIFT 24 +/* Protection Type */ +#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_MASK 0x70000000 +#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_SHIFT 28 + +/**** data_rd_cfg_1 register ****/ +/* AXI read ID (ARID) */ +#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARID_MASK 0x000000FF +#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARID_SHIFT 0 +/* Cache Type */ +#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARCACHE_MASK 0x000F0000 +#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARCACHE_SHIFT 16 +/* Burst type */ +#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARBURST_MASK 0x03000000 +#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARBURST_SHIFT 24 + +/**** data_rd_cfg_2 register ****/ +/* User extension */ +#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARUSER_MASK 0x000FFFFF +#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARUSER_SHIFT 0 +/* Bus size, 128-bit */ +#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARSIZE_MASK 0x00700000 +#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARSIZE_SHIFT 20 +/* + * AXI Master QoS. + * Used for arbitration between AXI masters + */ +#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARQOS_MASK 0x07000000 +#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARQOS_SHIFT 24 +/* Protection Type */ +#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARPROT_MASK 0x70000000 +#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARPROT_SHIFT 28 + +/**** desc_rd_cfg_1 register ****/ +/* AXI read ID (ARID) */ +#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARID_MASK 0x000000FF +#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARID_SHIFT 0 +/* Cache Type */ +#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARCACHE_MASK 0x000F0000 +#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARCACHE_SHIFT 16 +/* Burst type */ +#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARBURST_MASK 0x03000000 +#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARBURST_SHIFT 24 + +/**** desc_rd_cfg_2 register ****/ +/* User extension */ +#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARUSER_MASK 0x000FFFFF +#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARUSER_SHIFT 0 +/* Bus size, 128-bit */ +#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARSIZE_MASK 0x00700000 +#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARSIZE_SHIFT 20 +/* + * AXI Master QoS + * Used for arbitration between AXI masters + */ +#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARQOS_MASK 0x07000000 +#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARQOS_SHIFT 24 +/* Protection Type */ +#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARPROT_MASK 0x70000000 +#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARPROT_SHIFT 28 + +/**** data_rd_cfg register ****/ +/* + * Defines the maximum number of AXI beats for a single AXI burst. + * This value is used for a burst split decision. + */ +#define UDMA_AXI_M2S_DATA_RD_CFG_MAX_AXI_BEATS_MASK 0x000000FF +#define UDMA_AXI_M2S_DATA_RD_CFG_MAX_AXI_BEATS_SHIFT 0 +/* + * Enable breaking data read request. + * Aligned to max_AXI_beats when the total read size is less than max_AXI_beats + */ +#define UDMA_AXI_M2S_DATA_RD_CFG_ALWAYS_BREAK_ON_MAX_BOUDRY (1 << 16) + +/**** desc_rd_cfg_3 register ****/ +/* + * Defines the maximum number of AXI beats for a single AXI burst. + * This value is used for a burst split decision. + * Maximum burst size for reading data( in AXI beats, 128-bits) + * (default – 16 beats, 256 bytes) + */ +#define UDMA_AXI_M2S_DESC_RD_CFG_3_MAX_AXI_BEATS_MASK 0x000000FF +#define UDMA_AXI_M2S_DESC_RD_CFG_3_MAX_AXI_BEATS_SHIFT 0 +/* + * Enable breaking descriptor read request. + * Aligned to max_AXI_beats when the total read size is less than max_AXI_beats. + */ +#define UDMA_AXI_M2S_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY (1 << 16) + +/**** desc_wr_cfg_1 register ****/ +/* + * Defines the maximum number of AXI beats for a single AXI burst. + * This value is used for a burst split decision. + */ +#define UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK 0x000000FF +#define UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT 0 +/* + * Minimum burst for writing completion descriptors. + * Defined in AXI beats + * 4 Descriptors per beat. + * Value must be aligned to cache lines (64 bytes). + * Default value is 2 cache lines, 32 descriptors, 8 beats. + */ +#define UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK 0x00FF0000 +#define UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT 16 + +/**** ostand_cfg register ****/ +/* Maximum number of outstanding data reads to the AXI (AXI transactions) */ +#define UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_MASK 0x0000003F +#define UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_SHIFT 0 +/* + * Maximum number of outstanding descriptor reads to the AXI (AXI transactions) + */ +#define UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_MASK 0x00003F00 +#define UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_SHIFT 8 +/* + * Maximum number of outstanding descriptor writes to the AXI (AXI transactions) + */ +#define UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_MASK 0x003F0000 +#define UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_SHIFT 16 +/* + * Maximum number of outstanding data beats for descriptor write to AXI (AXI + * beats) + */ +#define UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_MASK 0xFF000000 +#define UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_SHIFT 24 + +/**** state register ****/ +/* Completion control */ +#define UDMA_M2S_STATE_COMP_CTRL_MASK 0x00000003 +#define UDMA_M2S_STATE_COMP_CTRL_SHIFT 0 +/* Stream interface */ +#define UDMA_M2S_STATE_STREAM_IF_MASK 0x00000030 +#define UDMA_M2S_STATE_STREAM_IF_SHIFT 4 +/* Data read control */ +#define UDMA_M2S_STATE_DATA_RD_CTRL_MASK 0x00000300 +#define UDMA_M2S_STATE_DATA_RD_CTRL_SHIFT 8 +/* Descriptor prefetch */ +#define UDMA_M2S_STATE_DESC_PREF_MASK 0x00003000 +#define UDMA_M2S_STATE_DESC_PREF_SHIFT 12 + +/**** change_state register ****/ +/* Start normal operation */ +#define UDMA_M2S_CHANGE_STATE_NORMAL (1 << 0) +/* Stop normal operation */ +#define UDMA_M2S_CHANGE_STATE_DIS (1 << 1) +/* + * Stop all machines. + * (Prefetch, scheduling, completion and stream interface) + */ +#define UDMA_M2S_CHANGE_STATE_ABORT (1 << 2) + +/**** err_log_mask register ****/ +/* + * Mismatch of packet serial number. + * (between first packet in the unacknowledged FIFO and received ack from the + * stream) + */ +#define UDMA_M2S_ERR_LOG_MASK_COMP_PKT_MISMATCH (1 << 0) +/* Parity error */ +#define UDMA_M2S_ERR_LOG_MASK_STREAM_AXI_PARITY (1 << 1) +/* AXI response error */ +#define UDMA_M2S_ERR_LOG_MASK_STREAM_AXI_RESPONSE (1 << 2) +/* AXI timeout (ack not received) */ +#define UDMA_M2S_ERR_LOG_MASK_STREAM_AXI_TOUT (1 << 3) +/* Parity error */ +#define UDMA_M2S_ERR_LOG_MASK_COMP_AXI_PARITY (1 << 4) +/* AXI response error */ +#define UDMA_M2S_ERR_LOG_MASK_COMP_AXI_RESPONSE (1 << 5) +/* AXI timeout */ +#define UDMA_M2S_ERR_LOG_MASK_COMP_AXI_TOUT (1 << 6) +/* Parity error */ +#define UDMA_M2S_ERR_LOG_MASK_DATA_AXI_PARITY (1 << 7) +/* AXI response error */ +#define UDMA_M2S_ERR_LOG_MASK_DATA_AXI_RESPONSE (1 << 8) +/* AXI timeout */ +#define UDMA_M2S_ERR_LOG_MASK_DATA_AXI_TOUT (1 << 9) +/* Parity error */ +#define UDMA_M2S_ERR_LOG_MASK_PREF_AXI_PARITY (1 << 10) +/* AXI response error */ +#define UDMA_M2S_ERR_LOG_MASK_PREF_AXI_RESPONSE (1 << 11) +/* AXI timeout */ +#define UDMA_M2S_ERR_LOG_MASK_PREF_AXI_TOUT (1 << 12) +/* Packet length error */ +#define UDMA_M2S_ERR_LOG_MASK_PREF_PKT_LEN_OVERFLOW (1 << 13) +/* Maximum number of descriptors per packet error */ +#define UDMA_M2S_ERR_LOG_MASK_PREF_MAX_DESC_CNT (1 << 14) +/* Error in first bit indication of the descriptor */ +#define UDMA_M2S_ERR_LOG_MASK_PREF_FIRST (1 << 15) +/* Error in last bit indication of the descriptor */ +#define UDMA_M2S_ERR_LOG_MASK_PREF_LAST (1 << 16) +/* Ring_ID error */ +#define UDMA_M2S_ERR_LOG_MASK_PREF_RING_ID (1 << 17) +/* Data buffer parity error */ +#define UDMA_M2S_ERR_LOG_MASK_DATA_BUFF_PARITY (1 << 18) +/* Internal error */ +#define UDMA_M2S_ERR_LOG_MASK_INTERNAL_MASK 0xFFF80000 +#define UDMA_M2S_ERR_LOG_MASK_INTERNAL_SHIFT 19 + +/**** clear_err_log register ****/ +/* Clear error log */ +#define UDMA_M2S_CLEAR_ERR_LOG_CLEAR (1 << 0) + +/**** data_fifo_status register ****/ +/* FIFO used indication */ +#define UDMA_M2S_DATA_FIFO_STATUS_USED_MASK 0x0000FFFF +#define UDMA_M2S_DATA_FIFO_STATUS_USED_SHIFT 0 +/* FIFO empty indication */ +#define UDMA_M2S_DATA_FIFO_STATUS_EMPTY (1 << 24) +/* FIFO full indication */ +#define UDMA_M2S_DATA_FIFO_STATUS_FULL (1 << 28) + +/**** header_fifo_status register ****/ +/* FIFO used indication */ +#define UDMA_M2S_HEADER_FIFO_STATUS_USED_MASK 0x0000FFFF +#define UDMA_M2S_HEADER_FIFO_STATUS_USED_SHIFT 0 +/* FIFO empty indication */ +#define UDMA_M2S_HEADER_FIFO_STATUS_EMPTY (1 << 24) +/* FIFO full indication */ +#define UDMA_M2S_HEADER_FIFO_STATUS_FULL (1 << 28) + +/**** unack_fifo_status register ****/ +/* FIFO used indication */ +#define UDMA_M2S_UNACK_FIFO_STATUS_USED_MASK 0x0000FFFF +#define UDMA_M2S_UNACK_FIFO_STATUS_USED_SHIFT 0 +/* FIFO empty indication */ +#define UDMA_M2S_UNACK_FIFO_STATUS_EMPTY (1 << 24) +/* FIFO full indication */ +#define UDMA_M2S_UNACK_FIFO_STATUS_FULL (1 << 28) + +/**** indirect_ctrl register ****/ +/* Selected queue for status read */ +#define UDMA_M2S_INDIRECT_CTRL_Q_NUM_MASK 0x00000FFF +#define UDMA_M2S_INDIRECT_CTRL_Q_NUM_SHIFT 0 + +/**** sel_pref_fifo_status register ****/ +/* FIFO used indication */ +#define UDMA_M2S_SEL_PREF_FIFO_STATUS_USED_MASK 0x0000FFFF +#define UDMA_M2S_SEL_PREF_FIFO_STATUS_USED_SHIFT 0 +/* FIFO empty indication */ +#define UDMA_M2S_SEL_PREF_FIFO_STATUS_EMPTY (1 << 24) +/* FIFO full indication */ +#define UDMA_M2S_SEL_PREF_FIFO_STATUS_FULL (1 << 28) + +/**** sel_comp_fifo_status register ****/ +/* FIFO used indication */ +#define UDMA_M2S_SEL_COMP_FIFO_STATUS_USED_MASK 0x0000FFFF +#define UDMA_M2S_SEL_COMP_FIFO_STATUS_USED_SHIFT 0 +/* FIFO empty indication */ +#define UDMA_M2S_SEL_COMP_FIFO_STATUS_EMPTY (1 << 24) +/* FIFO full indication */ +#define UDMA_M2S_SEL_COMP_FIFO_STATUS_FULL (1 << 28) + +/**** sel_rate_limit_status register ****/ +/* Token counter */ +#define UDMA_M2S_SEL_RATE_LIMIT_STATUS_TOKEN_CNT_MASK 0x00FFFFFF +#define UDMA_M2S_SEL_RATE_LIMIT_STATUS_TOKEN_CNT_SHIFT 0 + +/**** sel_dwrr_status register ****/ +/* Deficit counter */ +#define UDMA_M2S_SEL_DWRR_STATUS_DEFICIT_CNT_MASK 0x00FFFFFF +#define UDMA_M2S_SEL_DWRR_STATUS_DEFICIT_CNT_SHIFT 0 + +/**** cfg_len register ****/ +/* Maximum packet size for the M2S */ +#define UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_MASK 0x000FFFFF +#define UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_SHIFT 0 +/* + * Length encoding for 64K. + * 0 - length 0x0000 = 0 + * 1 - length 0x0000 = 64k + */ +#define UDMA_M2S_CFG_LEN_ENCODE_64K (1 << 24) + +/**** stream_cfg register ****/ +/* + * Disables the stream interface operation. + * Changing to 1 stops at the end of packet transmission. + */ +#define UDMA_M2S_STREAM_CFG_DISABLE (1 << 0) +/* + * Configuration of the stream FIFO read control. + * 0 - Cut through + * 1 - Threshold based + */ +#define UDMA_M2S_STREAM_CFG_RD_MODE (1 << 1) +/* Minimum number of beats to start packet transmission. */ +#define UDMA_M2S_STREAM_CFG_RD_TH_MASK 0x0003FF00 +#define UDMA_M2S_STREAM_CFG_RD_TH_SHIFT 8 + +/**** desc_pref_cfg_1 register ****/ +/* Size of the descriptor prefetch FIFO (in descriptors) */ +#define UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK 0x000000FF +#define UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_SHIFT 0 + +/**** desc_pref_cfg_2 register ****/ +/* Maximum number of descriptors per packet */ +#define UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK 0x0000001F +#define UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_SHIFT 0 +/* + * Force RR arbitration in the prefetch arbiter. + * 0 -Standard arbitration based on queue QoS + * 1 - Force Round Robin arbitration + */ +#define UDMA_M2S_RD_DESC_PREF_CFG_2_PREF_FORCE_RR (1 << 16) + +/**** desc_pref_cfg_3 register ****/ +/* + * Minimum descriptor burst size when prefetch FIFO level is below the + * descriptor prefetch threshold + * (must be 1) + */ +#define UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK 0x0000000F +#define UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_SHIFT 0 +/* + * Minimum descriptor burst size when prefetch FIFO level is above the + * descriptor prefetch threshold + */ +#define UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK 0x000000F0 +#define UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT 4 +/* + * Descriptor fetch threshold. + * Used as a threshold to determine the allowed minimum descriptor burst size. + * (Must be at least max_desc_per_pkt) + */ +#define UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK 0x0000FF00 +#define UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT 8 + +/**** data_cfg register ****/ +/* + * Maximum number of data beats in the data read FIFO. + * Defined based on data FIFO size + * (default FIFO size 2KB → 128 beats) + */ +#define UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK 0x000003FF +#define UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_SHIFT 0 +/* + * Maximum number of packets in the data read FIFO. + * Defined based on header FIFO size + */ +#define UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_MASK 0x00FF0000 +#define UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_SHIFT 16 + +/**** cfg_sched register ****/ +/* + * Enable the DWRR scheduler. + * If this bit is 0, queues with same QoS will be served with RR scheduler. + */ +#define UDMA_M2S_DWRR_CFG_SCHED_EN_DWRR (1 << 0) +/* + * Scheduler operation mode. + * 0 - Byte mode + * 1 - Packet mode + */ +#define UDMA_M2S_DWRR_CFG_SCHED_PKT_MODE_EN (1 << 4) +/* + * Enable incrementing the weight factor between DWRR iterations. + * 00 - Don't increase the increment factor. + * 01 - Increment once + * 10 - Increment exponential + * 11 - Reserved + */ +#define UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_MASK 0x00000300 +#define UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_SHIFT 8 +/* + * Increment factor power of 2. + * 7 --> 128 bytes + * This is the factor used to multiply the weight. + */ +#define UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_MASK 0x000F0000 +#define UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_SHIFT 16 + +/**** ctrl_deficit_cnt register ****/ +/* + * Init value for the deficit counter. + * Initializes the deficit counters of all queues to this value any time this + * register is written. + */ +#define UDMA_M2S_DWRR_CTRL_DEFICIT_CNT_INIT_MASK 0x00FFFFFF +#define UDMA_M2S_DWRR_CTRL_DEFICIT_CNT_INIT_SHIFT 0 + +/**** gen_cfg register ****/ +/* Size of the basic token fill cycle, system clock cycles */ +#define UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_MASK 0x0000FFFF +#define UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_SHIFT 0 +/* + * Rate limiter operation mode. + * 0 - Byte mode + * 1 - Packet mode + */ +#define UDMA_M2S_RATE_LIMITER_GEN_CFG_PKT_MODE_EN (1 << 24) + +/**** ctrl_cycle_cnt register ****/ +/* Reset the short and long cycle counters. */ +#define UDMA_M2S_RATE_LIMITER_CTRL_CYCLE_CNT_RST (1 << 0) + +/**** ctrl_token register ****/ +/* + * Init value for the token counter. + * Initializes the token counters of all queues to this value any time this + * register is written. + */ +#define UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_MASK 0x00FFFFFF +#define UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_SHIFT 0 + +/**** cfg_1s register ****/ +/* Maximum number of accumulated bytes in the token counter */ +#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_MASK 0x00FFFFFF +#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_SHIFT 0 +/* Enable the rate limiter. */ +#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_EN (1 << 24) +/* Stop token fill. */ +#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_PAUSE (1 << 25) + +/**** cfg_cycle register ****/ +/* Number of short cycles between token fills */ +#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_MASK 0x0000FFFF +#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_SHIFT 0 + +/**** cfg_token_size_1 register ****/ +/* Number of bits to add in each long cycle */ +#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK 0x0007FFFF +#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_SHIFT 0 + +/**** cfg_token_size_2 register ****/ +/* Number of bits to add in each short cycle */ +#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK 0x0007FFFF +#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_SHIFT 0 + +/**** sw_ctrl register ****/ +/* Reset the token bucket counter. */ +#define UDMA_M2S_STREAM_RATE_LIMITER_SW_CTRL_RST_TOKEN_CNT (1 << 0) + +/**** mask register ****/ +/* Mask the external rate limiter. */ +#define UDMA_M2S_STREAM_RATE_LIMITER_MASK_EXTERNAL_RATE_LIMITER (1 << 0) +/* Mask the internal rate limiter. */ +#define UDMA_M2S_STREAM_RATE_LIMITER_MASK_INTERNAL_RATE_LIMITER (1 << 1) +/* Mask the external application pause interface. */ +#define UDMA_M2S_STREAM_RATE_LIMITER_MASK_EXTERNAL_PAUSE (1 << 3) + +/**** cfg_1c register ****/ +/* + * Completion FIFO size + * (descriptors per queue) + */ +#define UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_MASK 0x000000FF +#define UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_SHIFT 0 +/* + * Unacknowledged FIFO size. + * (descriptors) + */ +#define UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_MASK 0x0001FF00 +#define UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_SHIFT 8 +/* + * Enable promotion. + * Enable the promotion of the current queue in progress for the completion + * write scheduler. + */ +#define UDMA_M2S_COMP_CFG_1C_Q_PROMOTION (1 << 24) +/* Force RR arbitration in the completion arbiter */ +#define UDMA_M2S_COMP_CFG_1C_FORCE_RR (1 << 25) +/* Minimum number of free completion entries to qualify for promotion */ +#define UDMA_M2S_COMP_CFG_1C_Q_FREE_MIN_MASK 0xF0000000 +#define UDMA_M2S_COMP_CFG_1C_Q_FREE_MIN_SHIFT 28 + +/**** cfg_application_ack register ****/ +/* + * Acknowledge timeout timer. + * ACK from the application through the stream interface) + */ +#define UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_MASK 0x00FFFFFF +#define UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_SHIFT 0 + +/**** cfg_st register ****/ +/* Use additional length value for all statistics counters. */ +#define UDMA_M2S_STAT_CFG_ST_USE_EXTRA_LEN (1 << 0) + +/**** reg_1 register ****/ +/* + * Read the size of the descriptor prefetch FIFO + * (descriptors). + */ +#define UDMA_M2S_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_MASK 0x000000FF +#define UDMA_M2S_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_SHIFT 0 + +/**** reg_3 register ****/ +/* + * Maximum number of data beats in the data read FIFO. + * Defined based on data FIFO size + * (default FIFO size 2KB → 128 beats) + */ +#define UDMA_M2S_FEATURE_REG_3_DATA_FIFO_DEPTH_MASK 0x000003FF +#define UDMA_M2S_FEATURE_REG_3_DATA_FIFO_DEPTH_SHIFT 0 +/* + * Maximum number of packets in the data read FIFO. + * Defined based on header FIFO size + */ +#define UDMA_M2S_FEATURE_REG_3_DATA_RD_MAX_PKT_LIMIT_MASK 0x00FF0000 +#define UDMA_M2S_FEATURE_REG_3_DATA_RD_MAX_PKT_LIMIT_SHIFT 16 + +/**** reg_4 register ****/ +/* + * Size of the completion FIFO of each queue + * (words) + */ +#define UDMA_M2S_FEATURE_REG_4_COMP_FIFO_DEPTH_MASK 0x000000FF +#define UDMA_M2S_FEATURE_REG_4_COMP_FIFO_DEPTH_SHIFT 0 +/* Size of the unacknowledged FIFO (descriptors) */ +#define UDMA_M2S_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_MASK 0x0001FF00 +#define UDMA_M2S_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_SHIFT 8 + +/**** reg_5 register ****/ +/* Maximum number of outstanding data reads to AXI */ +#define UDMA_M2S_FEATURE_REG_5_MAX_DATA_RD_OSTAND_MASK 0x0000003F +#define UDMA_M2S_FEATURE_REG_5_MAX_DATA_RD_OSTAND_SHIFT 0 +/* Maximum number of outstanding descriptor reads to AXI */ +#define UDMA_M2S_FEATURE_REG_5_MAX_DESC_RD_OSTAND_MASK 0x00003F00 +#define UDMA_M2S_FEATURE_REG_5_MAX_DESC_RD_OSTAND_SHIFT 8 +/* + * Maximum number of outstanding descriptor writes to AXI. + * (AXI transactions) + */ +#define UDMA_M2S_FEATURE_REG_5_MAX_COMP_REQ_MASK 0x003F0000 +#define UDMA_M2S_FEATURE_REG_5_MAX_COMP_REQ_SHIFT 16 +/* + * Maximum number of outstanding data beats for descriptor write to AXI. + * (AXI beats) + */ +#define UDMA_M2S_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_MASK 0xFF000000 +#define UDMA_M2S_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_SHIFT 24 + +/**** cfg register ****/ +/* + * Length offset to be used for each packet from this queue. + * (length offset is used for the scheduler and rate limiter). + */ +#define UDMA_M2S_Q_CFG_PKT_LEN_OFFSET_MASK 0x0000FFFF +#define UDMA_M2S_Q_CFG_PKT_LEN_OFFSET_SHIFT 0 +/* + * Enable operation of this queue. + * Start prefetch. + */ +#define UDMA_M2S_Q_CFG_EN_PREF (1 << 16) +/* + * Enable operation of this queue. + * Start scheduling. + */ +#define UDMA_M2S_Q_CFG_EN_SCHEDULING (1 << 17) +/* Allow prefetch of less than minimum prefetch burst size. */ +#define UDMA_M2S_Q_CFG_ALLOW_LT_MIN_PREF (1 << 20) +/* Configure the AXI AWCACHE for completion write. */ +#define UDMA_M2S_Q_CFG_AXI_AWCACHE_COMP_MASK 0x0F000000 +#define UDMA_M2S_Q_CFG_AXI_AWCACHE_COMP_SHIFT 24 +/* + * AXI QoS for the selected queue. + * This value is used in AXI transactions associated with this queue and the + * prefetch and completion arbiters. + */ +#define UDMA_M2S_Q_CFG_AXI_QOS_MASK 0x70000000 +#define UDMA_M2S_Q_CFG_AXI_QOS_SHIFT 28 + +/**** status register ****/ +/* Indicates how many entries are used in the queue */ +#define UDMA_M2S_Q_STATUS_Q_USED_MASK 0x01FFFFFF +#define UDMA_M2S_Q_STATUS_Q_USED_SHIFT 0 +/* + * prefetch status + * 0 – prefetch operation is stopped + * 1 – prefetch is operational + */ +#define UDMA_M2S_Q_STATUS_PREFETCH (1 << 28) +/* + * Queue scheduler status + * 0 – queue is not active and not participating in scheduling + * 1 – queue is active and participating in the scheduling process + */ +#define UDMA_M2S_Q_STATUS_SCHEDULER (1 << 29) +/* Queue is suspended due to DMB */ +#define UDMA_M2S_Q_STATUS_Q_DMB (1 << 30) +/* + * Queue full indication. + * (used by the host when head pointer equals tail pointer). + */ +#define UDMA_M2S_Q_STATUS_Q_FULL (1 << 31) +/* + * M2S Descriptor Ring Base address [31:4]. + * Value of the base address of the M2S descriptor ring + * [3:0] - 0 - 16B alignment is enforced + * ([11:4] should be 0 for 4KB alignment) + */ +#define UDMA_M2S_Q_TDRBP_LOW_ADDR_MASK 0xFFFFFFF0 +#define UDMA_M2S_Q_TDRBP_LOW_ADDR_SHIFT 4 + +/**** TDRL register ****/ +/* + * Length of the descriptor ring. + * (descriptors) + * Associated with the ring base address, ends at maximum burst size alignment. + */ +#define UDMA_M2S_Q_TDRL_OFFSET_MASK 0x00FFFFFF +#define UDMA_M2S_Q_TDRL_OFFSET_SHIFT 0 + +/**** TDRHP register ****/ +/* + * Relative offset of the next descriptor that needs to be read into the + * prefetch FIFO. + * Incremented when the DMA reads valid descriptors from the host memory to the + * prefetch FIFO. + * Note that this is the offset in # of descriptors and not in byte address. + */ +#define UDMA_M2S_Q_TDRHP_OFFSET_MASK 0x00FFFFFF +#define UDMA_M2S_Q_TDRHP_OFFSET_SHIFT 0 +/* Ring ID */ +#define UDMA_M2S_Q_TDRHP_RING_ID_MASK 0xC0000000 +#define UDMA_M2S_Q_TDRHP_RING_ID_SHIFT 30 + +/**** TDRTP_inc register ****/ +/* Increments the value in Q_TDRTP (descriptors) */ +#define UDMA_M2S_Q_TDRTP_INC_VAL_MASK 0x00FFFFFF +#define UDMA_M2S_Q_TDRTP_INC_VAL_SHIFT 0 + +/**** TDRTP register ****/ +/* + * Relative offset of the next free descriptor in the host memory. + * Note that this is the offset in # of descriptors and not in byte address. + */ +#define UDMA_M2S_Q_TDRTP_OFFSET_MASK 0x00FFFFFF +#define UDMA_M2S_Q_TDRTP_OFFSET_SHIFT 0 +/* Ring ID */ +#define UDMA_M2S_Q_TDRTP_RING_ID_MASK 0xC0000000 +#define UDMA_M2S_Q_TDRTP_RING_ID_SHIFT 30 + +/**** TDCP register ****/ +/* + * Relative offset of the first descriptor in the prefetch FIFO. + * This is the next descriptor that will be read by the scheduler. + */ +#define UDMA_M2S_Q_TDCP_OFFSET_MASK 0x00FFFFFF +#define UDMA_M2S_Q_TDCP_OFFSET_SHIFT 0 +/* Ring ID */ +#define UDMA_M2S_Q_TDCP_RING_ID_MASK 0xC0000000 +#define UDMA_M2S_Q_TDCP_RING_ID_SHIFT 30 +/* + * M2S Descriptor Ring Base address [31:4]. + * Value of the base address of the M2S descriptor ring + * [3:0] - 0 - 16B alignment is enforced + * ([11:4] should be 0 for 4KB alignment) + * NOTE: + * Length of the descriptor ring (in descriptors) associated with the ring base + * address. Ends at maximum burst size alignment. + */ +#define UDMA_M2S_Q_TCRBP_LOW_ADDR_MASK 0xFFFFFFF0 +#define UDMA_M2S_Q_TCRBP_LOW_ADDR_SHIFT 4 + +/**** TCRHP register ****/ +/* + * Relative offset of the next descriptor that needs to be updated by the + * completion controller. + * Note: This is in descriptors and not in byte address. + */ +#define UDMA_M2S_Q_TCRHP_OFFSET_MASK 0x00FFFFFF +#define UDMA_M2S_Q_TCRHP_OFFSET_SHIFT 0 +/* Ring ID */ +#define UDMA_M2S_Q_TCRHP_RING_ID_MASK 0xC0000000 +#define UDMA_M2S_Q_TCRHP_RING_ID_SHIFT 30 + +/**** TCRHP_internal register ****/ +/* + * Relative offset of the next descriptor that needs to be updated by the + * completion controller. + * Note: This is in descriptors and not in byte address. + */ +#define UDMA_M2S_Q_TCRHP_INTERNAL_OFFSET_MASK 0x00FFFFFF +#define UDMA_M2S_Q_TCRHP_INTERNAL_OFFSET_SHIFT 0 +/* Ring ID */ +#define UDMA_M2S_Q_TCRHP_INTERNAL_RING_ID_MASK 0xC0000000 +#define UDMA_M2S_Q_TCRHP_INTERNAL_RING_ID_SHIFT 30 + +/**** rate_limit_cfg_1 register ****/ +/* Maximum number of accumulated bytes in the token counter. */ +#define UDMA_M2S_Q_RATE_LIMIT_CFG_1_MAX_BURST_SIZE_MASK 0x00FFFFFF +#define UDMA_M2S_Q_RATE_LIMIT_CFG_1_MAX_BURST_SIZE_SHIFT 0 +/* Enable the rate limiter. */ +#define UDMA_M2S_Q_RATE_LIMIT_CFG_1_EN (1 << 24) +/* Stop token fill. */ +#define UDMA_M2S_Q_RATE_LIMIT_CFG_1_PAUSE (1 << 25) + +/**** rate_limit_cfg_cycle register ****/ +/* Number of short cycles between token fills */ +#define UDMA_M2S_Q_RATE_LIMIT_CFG_CYCLE_LONG_CYCLE_SIZE_MASK 0x0000FFFF +#define UDMA_M2S_Q_RATE_LIMIT_CFG_CYCLE_LONG_CYCLE_SIZE_SHIFT 0 + +/**** rate_limit_cfg_token_size_1 register ****/ +/* Number of bits to add in each long cycle */ +#define UDMA_M2S_Q_RATE_LIMIT_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK 0x0007FFFF +#define UDMA_M2S_Q_RATE_LIMIT_CFG_TOKEN_SIZE_1_LONG_CYCLE_SHIFT 0 + +/**** rate_limit_cfg_token_size_2 register ****/ +/* Number of bits to add in each cycle */ +#define UDMA_M2S_Q_RATE_LIMIT_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK 0x0007FFFF +#define UDMA_M2S_Q_RATE_LIMIT_CFG_TOKEN_SIZE_2_SHORT_CYCLE_SHIFT 0 + +/**** rate_limit_sw_ctrl register ****/ +/* Reset the token bucket counter. */ +#define UDMA_M2S_Q_RATE_LIMIT_SW_CTRL_RST_TOKEN_CNT (1 << 0) + +/**** rate_limit_mask register ****/ +/* Mask the external rate limiter. */ +#define UDMA_M2S_Q_RATE_LIMIT_MASK_EXTERNAL_RATE_LIMITER (1 << 0) +/* Mask the internal rate limiter. */ +#define UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_RATE_LIMITER (1 << 1) +/* + * Mask the internal pause mechanism for DMB. + * (Data Memory Barrier). + */ +#define UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_PAUSE_DMB (1 << 2) +/* Mask the external application pause interface. */ +#define UDMA_M2S_Q_RATE_LIMIT_MASK_EXTERNAL_PAUSE (1 << 3) + +/**** dwrr_cfg_1 register ****/ +/* Maximum number of accumulated bytes in the deficit counter */ +#define UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_MASK 0x00FFFFFF +#define UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_SHIFT 0 +/* Bypass the DWRR. */ +#define UDMA_M2S_Q_DWRR_CFG_1_STRICT (1 << 24) +/* Stop deficit counter increment. */ +#define UDMA_M2S_Q_DWRR_CFG_1_PAUSE (1 << 25) + +/**** dwrr_cfg_2 register ****/ +/* + * Value for the queue QoS. + * Queues with the same QoS value are scheduled with RR/DWRR. + * Only LOG(number of queues) is used. + */ +#define UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK 0x000000FF +#define UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_SHIFT 0 + +/**** dwrr_cfg_3 register ****/ +/* Queue weight */ +#define UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_MASK 0x000000FF +#define UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_SHIFT 0 + +/**** dwrr_sw_ctrl register ****/ +/* Reset the DWRR deficit counter. */ +#define UDMA_M2S_Q_DWRR_SW_CTRL_RST_CNT (1 << 0) + +/**** comp_cfg register ****/ +/* Enable writing to the completion ring */ +#define UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE (1 << 0) +/* Disable the completion coalescing function. */ +#define UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL (1 << 1) + +/**** q_sw_ctrl register ****/ +/* + * Reset the DMB hardware barrier + * (enable queue operation). + */ +#define UDMA_M2S_Q_SW_CTRL_RST_DMB (1 << 0) +/* Reset the tail pointer hardware. */ +#define UDMA_M2S_Q_SW_CTRL_RST_TAIL_PTR (1 << 1) +/* Reset the head pointer hardware. */ +#define UDMA_M2S_Q_SW_CTRL_RST_HEAD_PTR (1 << 2) +/* Reset the current pointer hardware. */ +#define UDMA_M2S_Q_SW_CTRL_RST_CURRENT_PTR (1 << 3) +/* Reset the queue */ +#define UDMA_M2S_Q_SW_CTRL_RST_Q (1 << 8) + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_UDMA_M2S_REG_H */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_regs_s2m.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_regs_s2m.h new file mode 100644 index 00000000000000..75091b3b5a57e8 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_udma_regs_s2m.h @@ -0,0 +1,997 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @file al_hal_udma_regs_s2m.h + * + * @brief C Header file for the UDMA S2M registers + * + */ + +#ifndef __AL_HAL_UDMA_S2M_REG_H +#define __AL_HAL_UDMA_S2M_REG_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + + + +struct udma_axi_s2m { + /* [0x0] Data write master configuration */ + uint32_t data_wr_cfg_1; + /* [0x4] Data write master configuration */ + uint32_t data_wr_cfg_2; + /* [0x8] Descriptor read master configuration */ + uint32_t desc_rd_cfg_4; + /* [0xc] Descriptor read master configuration */ + uint32_t desc_rd_cfg_5; + /* [0x10] Completion write master configuration */ + uint32_t comp_wr_cfg_1; + /* [0x14] Completion write master configuration */ + uint32_t comp_wr_cfg_2; + /* [0x18] Data write master configuration */ + uint32_t data_wr_cfg; + /* [0x1c] Descriptors read master configuration */ + uint32_t desc_rd_cfg_3; + /* [0x20] Completion descriptors write master configuration */ + uint32_t desc_wr_cfg_1; + /* [0x24] AXI outstanding read configuration */ + uint32_t ostand_cfg_rd; + /* [0x28] AXI outstanding write configuration */ + uint32_t ostand_cfg_wr; + uint32_t rsrvd[53]; +}; +struct udma_s2m { + /* + * [0x0] DMA state + * 00 - No pending tasks + * 01 – Normal (active) + * 10 – Abort (error condition) + * 11 – Reserved + */ + uint32_t state; + /* [0x4] CPU request to change DMA state */ + uint32_t change_state; + uint32_t rsrvd_0; + /* + * [0xc] S2M DMA error log mask. + * Each error has an interrupt controller cause bit. + * This register determines if these errors cause the S2M DMA to log the + * error condition. + * 0 - Log is enable + * 1 - Log is masked. + */ + uint32_t err_log_mask; + uint32_t rsrvd_1; + /* + * [0x14] DMA header log + * Sample the packet header that caused the error + */ + uint32_t log_0; + /* + * [0x18] DMA header log + * Sample the packet header that caused the error. + */ + uint32_t log_1; + /* + * [0x1c] DMA header log + * Sample the packet header that caused the error. + */ + uint32_t log_2; + /* + * [0x20] DMA header log + * Sample the packet header that caused the error + */ + uint32_t log_3; + /* [0x24] DMA clear error log */ + uint32_t clear_err_log; + /* [0x28] S2M stream data FIFO status */ + uint32_t s_data_fifo_status; + /* [0x2c] S2M stream header FIFO status */ + uint32_t s_header_fifo_status; + /* [0x30] S2M AXI data FIFO status */ + uint32_t axi_data_fifo_status; + /* [0x34] S2M unack FIFO status */ + uint32_t unack_fifo_status; + /* [0x38] Select queue for debug */ + uint32_t indirect_ctrl; + /* + * [0x3c] S2M prefetch FIFO status. + * Status of the selected queue in S2M_indirect_ctrl + */ + uint32_t sel_pref_fifo_status; + /* + * [0x40] S2M completion FIFO status. + * Status of the selected queue in S2M_indirect_ctrl + */ + uint32_t sel_comp_fifo_status; + /* [0x44] S2M state machine and FIFO clear control */ + uint32_t clear_ctrl; + /* [0x48] S2M Misc Check enable */ + uint32_t check_en; + /* [0x4c] S2M FIFO enable control, internal */ + uint32_t fifo_en; + /* [0x50] Stream interface configuration */ + uint32_t stream_cfg; + uint32_t rsrvd[43]; +}; +struct udma_s2m_rd { + /* [0x0] S2M descriptor prefetch configuration */ + uint32_t desc_pref_cfg_1; + /* [0x4] S2M descriptor prefetch configuration */ + uint32_t desc_pref_cfg_2; + /* [0x8] S2M descriptor prefetch configuration */ + uint32_t desc_pref_cfg_3; + /* [0xc] S2M descriptor prefetch configuration */ + uint32_t desc_pref_cfg_4; + uint32_t rsrvd[12]; +}; +struct udma_s2m_wr { + /* [0x0] Stream data FIFO configuration */ + uint32_t data_cfg_1; + /* [0x4] Data write configuration */ + uint32_t data_cfg_2; + uint32_t rsrvd[14]; +}; +struct udma_s2m_comp { + /* [0x0] Completion controller configuration */ + uint32_t cfg_1c; + /* [0x4] Completion controller configuration */ + uint32_t cfg_2c; + uint32_t rsrvd_0; + /* [0xc] Completion controller application acknowledge configuration */ + uint32_t cfg_application_ack; + uint32_t rsrvd[12]; +}; +struct udma_s2m_stat { + uint32_t rsrvd_0; + /* [0x4] Number of dropped packets */ + uint32_t drop_pkt; + /* + * [0x8] Counting the net length of the data buffers [64-bit] + * Should be read before rx_bytes_high + */ + uint32_t rx_bytes_low; + /* + * [0xc] Counting the net length of the data buffers [64-bit] + * Should be read after tx_bytes_low (value is sampled when reading + * Should be read before rx_bytes_low + */ + uint32_t rx_bytes_high; + /* [0x10] Total number of descriptors read from the host memory */ + uint32_t prefed_desc; + /* [0x14] Number of packets written into the completion ring */ + uint32_t comp_pkt; + /* [0x18] Number of descriptors written into the completion ring */ + uint32_t comp_desc; + /* + * [0x1c] Number of acknowledged packets. + * (acknowledge sent to the stream interface) + */ + uint32_t ack_pkts; + uint32_t rsrvd[56]; +}; +struct udma_s2m_feature { + /* + * [0x0] S2M Feature register + * S2M instantiation parameters + */ + uint32_t reg_1; + /* [0x4] Reserved S2M feature register */ + uint32_t reg_2; + /* + * [0x8] S2M Feature register + * S2M instantiation parameters + */ + uint32_t reg_3; + /* + * [0xc] S2M Feature register. + * S2M instantiation parameters. + */ + uint32_t reg_4; + /* + * [0x10] S2M Feature register. + * S2M instantiation parameters. + */ + uint32_t reg_5; + /* [0x14] S2M Feature register. S2M instantiation parameters. */ + uint32_t reg_6; + uint32_t rsrvd[58]; +}; +struct udma_s2m_q { + uint32_t rsrvd_0[8]; + /* [0x20] S2M Descriptor ring configuration */ + uint32_t cfg; + /* [0x24] S2M Descriptor ring status and information */ + uint32_t status; + /* [0x28] Rx Descriptor Ring Base Pointer [31:4] */ + uint32_t rdrbp_low; + /* [0x2c] Rx Descriptor Ring Base Pointer [63:32] */ + uint32_t rdrbp_high; + /* + * [0x30] Rx Descriptor Ring Length[23:2] + */ + uint32_t rdrl; + /* [0x34] RX Descriptor Ring Head Pointer */ + uint32_t rdrhp; + /* [0x38] Rx Descriptor Tail Pointer increment */ + uint32_t rdrtp_inc; + /* [0x3c] Rx Descriptor Tail Pointer */ + uint32_t rdrtp; + /* [0x40] RX Descriptor Current Pointer */ + uint32_t rdcp; + /* [0x44] Rx Completion Ring Base Pointer [31:4] */ + uint32_t rcrbp_low; + /* [0x48] Rx Completion Ring Base Pointer [63:32] */ + uint32_t rcrbp_high; + /* [0x4c] Rx Completion Ring Head Pointer */ + uint32_t rcrhp; + /* + * [0x50] RX Completion Ring Head Pointer internal. + * (Before the coalescing FIFO) + */ + uint32_t rcrhp_internal; + /* [0x54] Completion controller configuration for the queue */ + uint32_t comp_cfg; + /* [0x58] Completion controller configuration for the queue */ + uint32_t comp_cfg_2; + /* [0x5c] Packet handler configuration */ + uint32_t pkt_cfg; + /* [0x60] Queue QoS configuration */ + uint32_t qos_cfg; + /* [0x64] DMB software control */ + uint32_t q_sw_ctrl; + /* [0x68] Number of S2M Rx packets after completion */ + uint32_t q_rx_pkt; + uint32_t rsrvd[997]; +}; + +struct udma_s2m_regs { + uint32_t rsrvd_0[64]; + struct udma_axi_s2m axi_s2m; /* [0x100] */ + struct udma_s2m s2m; /* [0x200] */ + struct udma_s2m_rd s2m_rd; /* [0x300] */ + struct udma_s2m_wr s2m_wr; /* [0x340] */ + struct udma_s2m_comp s2m_comp; /* [0x380] */ + uint32_t rsrvd_1[80]; + struct udma_s2m_stat s2m_stat; /* [0x500] */ + struct udma_s2m_feature s2m_feature; /* [0x600] */ + uint32_t rsrvd_2[576]; + struct udma_s2m_q s2m_q[4]; /* [0x1000] */ +}; + + +/* +* Registers Fields +*/ + + +/**** data_wr_cfg_1 register ****/ +/* AXI write ID (AWID) */ +#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWID_MASK 0x000000FF +#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWID_SHIFT 0 +/* Cache Type */ +#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWCACHE_MASK 0x000F0000 +#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWCACHE_SHIFT 16 +/* Burst type */ +#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWBURST_MASK 0x03000000 +#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWBURST_SHIFT 24 + +/**** data_wr_cfg_2 register ****/ +/* User extension */ +#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWUSER_MASK 0x000FFFFF +#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWUSER_SHIFT 0 +/* Bus size, 128-bit */ +#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWSIZE_MASK 0x00700000 +#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWSIZE_SHIFT 20 +/* + * AXI Master QoS. + * Used for arbitration between AXI masters + */ +#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWQOS_MASK 0x07000000 +#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWQOS_SHIFT 24 +/* Protection Type */ +#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWPROT_MASK 0x70000000 +#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWPROT_SHIFT 28 + +/**** desc_rd_cfg_4 register ****/ +/* AXI read ID (ARID) */ +#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARID_MASK 0x000000FF +#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARID_SHIFT 0 +/* Cache Type */ +#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARCACHE_MASK 0x000F0000 +#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARCACHE_SHIFT 16 +/* Burst type */ +#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARBURST_MASK 0x03000000 +#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARBURST_SHIFT 24 + +/**** desc_rd_cfg_5 register ****/ +/* User extension */ +#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARUSER_MASK 0x000FFFFF +#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARUSER_SHIFT 0 +/* Bus size, 128-bit */ +#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARSIZE_MASK 0x00700000 +#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARSIZE_SHIFT 20 +/* + * AXI Master QoS. + * Used for arbitration between AXI masters + */ +#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARQOS_MASK 0x07000000 +#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARQOS_SHIFT 24 +/* Protection Type */ +#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARPROT_MASK 0x70000000 +#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARPROT_SHIFT 28 + +/**** comp_wr_cfg_1 register ****/ +/* AXI write ID (AWID) */ +#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK 0x000000FF +#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_SHIFT 0 +/* Cache Type */ +#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_MASK 0x000F0000 +#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_SHIFT 16 +/* Burst type */ +#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK 0x03000000 +#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_SHIFT 24 + +/**** comp_wr_cfg_2 register ****/ +/* User extension */ +#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_MASK 0x000FFFFF +#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_SHIFT 0 +/* Bus size, 128-bit */ +#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_MASK 0x00700000 +#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_SHIFT 20 +/* + * AXI Master QoS. + * Used for arbitration between AXI masters + */ +#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_MASK 0x07000000 +#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_SHIFT 24 +/* Protection Type */ +#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_MASK 0x70000000 +#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_SHIFT 28 + +/**** data_wr_cfg register ****/ +/* + * Defines the maximum number of AXI beats for a single AXI burst. This value is + * used for the burst split decision. + */ +#define UDMA_AXI_S2M_DATA_WR_CFG_MAX_AXI_BEATS_MASK 0x000000FF +#define UDMA_AXI_S2M_DATA_WR_CFG_MAX_AXI_BEATS_SHIFT 0 + +/**** desc_rd_cfg_3 register ****/ +/* + * Defines the maximum number of AXI beats for a single AXI burst. This value is + * used for the burst split decision. + */ +#define UDMA_AXI_S2M_DESC_RD_CFG_3_MAX_AXI_BEATS_MASK 0x000000FF +#define UDMA_AXI_S2M_DESC_RD_CFG_3_MAX_AXI_BEATS_SHIFT 0 +/* + * Enables breaking descriptor read request. + * Aligned to max_AXI_beats when the total read size is less than max_AXI_beats. + */ +#define UDMA_AXI_S2M_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY (1 << 16) + +/**** desc_wr_cfg_1 register ****/ +/* + * Defines the maximum number of AXI beats for a single AXI burst. This value is + * used for the burst split decision. + */ +#define UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK 0x000000FF +#define UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT 0 +/* + * Minimum burst for writing completion descriptors. + * (AXI beats). + * Value must be aligned to cache lines (64 bytes). + * Default value is 2 cache lines, 8 beats. + */ +#define UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK 0x00FF0000 +#define UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT 16 + +/**** ostand_cfg_rd register ****/ +/* + * Maximum number of outstanding descriptor reads to the AXI. + * (AXI transactions). + */ +#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_MASK 0x0000003F +#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_SHIFT 0 +/* Maximum number of outstanding stream acknowledges. */ +#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_MASK 0x001F0000 +#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_SHIFT 16 + +/**** ostand_cfg_wr register ****/ +/* + * Maximum number of outstanding data writes to the AXI. + * (AXI transactions). + */ +#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_MASK 0x0000003F +#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_SHIFT 0 +/* + * Maximum number of outstanding data beats for data write to AXI. + * (AXI beats). + */ +#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_MASK 0x0000FF00 +#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_SHIFT 8 +/* + * Maximum number of outstanding descriptor writes to the AXI. + * (AXI transactions). + */ +#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_MASK 0x003F0000 +#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_SHIFT 16 +/* + * Maximum number of outstanding data beats for descriptor write to AXI. + * (AXI beats). + */ +#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_MASK 0xFF000000 +#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_SHIFT 24 + +/**** state register ****/ + +#define UDMA_S2M_STATE_COMP_CTRL_MASK 0x00000003 +#define UDMA_S2M_STATE_COMP_CTRL_SHIFT 0 + +#define UDMA_S2M_STATE_STREAM_IF_MASK 0x00000030 +#define UDMA_S2M_STATE_STREAM_IF_SHIFT 4 + +#define UDMA_S2M_STATE_DATA_WR_CTRL_MASK 0x00000300 +#define UDMA_S2M_STATE_DATA_WR_CTRL_SHIFT 8 + +#define UDMA_S2M_STATE_DESC_PREF_MASK 0x00003000 +#define UDMA_S2M_STATE_DESC_PREF_SHIFT 12 + +#define UDMA_S2M_STATE_AXI_WR_DATA_MASK 0x00030000 +#define UDMA_S2M_STATE_AXI_WR_DATA_SHIFT 16 + +/**** change_state register ****/ +/* Start normal operation */ +#define UDMA_S2M_CHANGE_STATE_NORMAL (1 << 0) +/* Stop normal operation */ +#define UDMA_S2M_CHANGE_STATE_DIS (1 << 1) +/* + * Stop all machines. + * (Prefetch, scheduling, completion and stream interface) + */ +#define UDMA_S2M_CHANGE_STATE_ABORT (1 << 2) + +/**** clear_err_log register ****/ +/* Clear error log */ +#define UDMA_S2M_CLEAR_ERR_LOG_CLEAR (1 << 0) + +/**** s_data_fifo_status register ****/ +/* FIFO used indication */ +#define UDMA_S2M_S_DATA_FIFO_STATUS_USED_MASK 0x0000FFFF +#define UDMA_S2M_S_DATA_FIFO_STATUS_USED_SHIFT 0 +/* FIFO empty indication */ +#define UDMA_S2M_S_DATA_FIFO_STATUS_EMPTY (1 << 24) +/* FIFO full indication */ +#define UDMA_S2M_S_DATA_FIFO_STATUS_FULL (1 << 28) + +/**** s_header_fifo_status register ****/ +/* FIFO used indication */ +#define UDMA_S2M_S_HEADER_FIFO_STATUS_USED_MASK 0x0000FFFF +#define UDMA_S2M_S_HEADER_FIFO_STATUS_USED_SHIFT 0 +/* FIFO empty indication */ +#define UDMA_S2M_S_HEADER_FIFO_STATUS_EMPTY (1 << 24) +/* FIFO full indication */ +#define UDMA_S2M_S_HEADER_FIFO_STATUS_FULL (1 << 28) + +/**** axi_data_fifo_status register ****/ +/* FIFO used indication */ +#define UDMA_S2M_AXI_DATA_FIFO_STATUS_USED_MASK 0x0000FFFF +#define UDMA_S2M_AXI_DATA_FIFO_STATUS_USED_SHIFT 0 +/* FIFO empty indication */ +#define UDMA_S2M_AXI_DATA_FIFO_STATUS_EMPTY (1 << 24) +/* FIFO full indication */ +#define UDMA_S2M_AXI_DATA_FIFO_STATUS_FULL (1 << 28) + +/**** unack_fifo_status register ****/ +/* FIFO used indication */ +#define UDMA_S2M_UNACK_FIFO_STATUS_USED_MASK 0x0000FFFF +#define UDMA_S2M_UNACK_FIFO_STATUS_USED_SHIFT 0 +/* FIFO empty indication */ +#define UDMA_S2M_UNACK_FIFO_STATUS_EMPTY (1 << 24) +/* FIFO full indication */ +#define UDMA_S2M_UNACK_FIFO_STATUS_FULL (1 << 28) + +/**** indirect_ctrl register ****/ +/* Selected queue for status read */ +#define UDMA_S2M_INDIRECT_CTRL_Q_NUM_MASK 0x00000FFF +#define UDMA_S2M_INDIRECT_CTRL_Q_NUM_SHIFT 0 + +/**** sel_pref_fifo_status register ****/ +/* FIFO used indication */ +#define UDMA_S2M_SEL_PREF_FIFO_STATUS_USED_MASK 0x0000FFFF +#define UDMA_S2M_SEL_PREF_FIFO_STATUS_USED_SHIFT 0 +/* FIFO empty indication */ +#define UDMA_S2M_SEL_PREF_FIFO_STATUS_EMPTY (1 << 24) +/* FIFO full indication */ +#define UDMA_S2M_SEL_PREF_FIFO_STATUS_FULL (1 << 28) + +/**** sel_comp_fifo_status register ****/ +/* FIFO used indication */ +#define UDMA_S2M_SEL_COMP_FIFO_STATUS_USED_MASK 0x0000FFFF +#define UDMA_S2M_SEL_COMP_FIFO_STATUS_USED_SHIFT 0 +/* Coalescing ACTIVE FSM state indication. */ +#define UDMA_S2M_SEL_COMP_FIFO_STATUS_COAL_ACTIVE_STATE_MASK 0x00300000 +#define UDMA_S2M_SEL_COMP_FIFO_STATUS_COAL_ACTIVE_STATE_SHIFT 20 +/* FIFO empty indication */ +#define UDMA_S2M_SEL_COMP_FIFO_STATUS_EMPTY (1 << 24) +/* FIFO full indication */ +#define UDMA_S2M_SEL_COMP_FIFO_STATUS_FULL (1 << 28) + +/**** stream_cfg register ****/ +/* + * Disables the stream interface operation. + * Changing to 1 stops at the end of packet reception. + */ +#define UDMA_S2M_STREAM_CFG_DISABLE (1 << 0) +/* + * Flush the stream interface operation. + * Changing to 1 stops at the end of packet reception and assert ready to the + * stream I/F. + */ +#define UDMA_S2M_STREAM_CFG_FLUSH (1 << 4) +/* Stop descriptor prefetch when the stream is disabled and the S2M is idle. */ +#define UDMA_S2M_STREAM_CFG_STOP_PREFETCH (1 << 8) + +/**** desc_pref_cfg_1 register ****/ +/* + * Size of the descriptor prefetch FIFO. + * (descriptors) + */ +#define UDMA_S2M_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK 0x000000FF +#define UDMA_S2M_RD_DESC_PREF_CFG_1_FIFO_DEPTH_SHIFT 0 + +/**** desc_pref_cfg_2 register ****/ +/* Enable promotion of the current queue in progress */ +#define UDMA_S2M_RD_DESC_PREF_CFG_2_Q_PROMOTION (1 << 0) +/* Force promotion of the current queue in progress */ +#define UDMA_S2M_RD_DESC_PREF_CFG_2_FORCE_PROMOTION (1 << 1) +/* Enable prefetch prediction of next packet in line. */ +#define UDMA_S2M_RD_DESC_PREF_CFG_2_EN_PREF_PREDICTION (1 << 2) +/* + * Threshold for queue promotion. + * Queue is promoted for prefetch if there are less descriptors in the prefetch + * FIFO than the threshold + */ +#define UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_MASK 0x0000FF00 +#define UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_SHIFT 8 +/* + * Force RR arbitration in the prefetch arbiter. + * 0 - Standard arbitration based on queue QoS + * 1 - Force round robin arbitration + */ +#define UDMA_S2M_RD_DESC_PREF_CFG_2_PREF_FORCE_RR (1 << 16) + +/**** desc_pref_cfg_3 register ****/ +/* + * Minimum descriptor burst size when prefetch FIFO level is below the + * descriptor prefetch threshold + * (must be 1) + */ +#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK 0x0000000F +#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_SHIFT 0 +/* + * Minimum descriptor burst size when prefetch FIFO level is above the + * descriptor prefetch threshold + */ +#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK 0x000000F0 +#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT 4 +/* + * Descriptor fetch threshold. + * Used as a threshold to determine the allowed minimum descriptor burst size. + * (Must be at least "max_desc_per_pkt") + */ +#define UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK 0x0000FF00 +#define UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT 8 + +/**** desc_pref_cfg_4 register ****/ +/* + * Used as a threshold for generating almost FULL indication to the application + */ +#define UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_MASK 0x000000FF +#define UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_SHIFT 0 + +/**** data_cfg_1 register ****/ +/* + * Maximum number of data beats in the data write FIFO. + * Defined based on data FIFO size + * (default FIFO size 512B → 32 beats) + */ +#define UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_MASK 0x000003FF +#define UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_SHIFT 0 +/* + * Maximum number of packets in the data write FIFO. + * Defined based on header FIFO size + */ +#define UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_MASK 0x00FF0000 +#define UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_SHIFT 16 +/* + * Internal use + * Data FIFO margin + */ +#define UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_MASK 0xFF000000 +#define UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_SHIFT 24 + +/**** data_cfg_2 register ****/ +/* + * Drop timer. + * Waiting time for the host to write new descriptor to the queue + * (for the current packet in process) + */ +#define UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK 0x00FFFFFF +#define UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_SHIFT 0 +/* + * Drop enable. + * Enable packet drop if there are no available descriptors in the system for + * this queue + */ +#define UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC (1 << 27) +/* + * Lack of descriptors hint. + * Generate interrupt when a packet is waiting but there are no available + * descriptors in the queue + */ +#define UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC (1 << 28) +/* + * Drop conditions + * Wait until a descriptor is available in the prefetch FIFO or the host before + * dropping packet. + * 1 - Drop if a descriptor is not available in the prefetch. + * 0 - Drop if a descriptor is not available in the system + */ +#define UDMA_S2M_WR_DATA_CFG_2_WAIT_FOR_PREF (1 << 29) +/* + * DRAM write optimization + * 0 - Data write with byte enable + * 1 - Data write is always in Full AXI bus width (128 bit) + */ +#define UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE (1 << 30) +/* + * Direct data write address + * 1 - Use buffer 1 instead of buffer 2 when direct data placement is used with + * header split. + * 0 - Use buffer 2 for the header. + */ +#define UDMA_S2M_WR_DATA_CFG_2_DIRECT_HDR_USE_BUF1 (1 << 31) + +/**** cfg_1c register ****/ +/* + * Completion descriptor size. + * (words) + */ +#define UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK 0x0000000F +#define UDMA_S2M_COMP_CFG_1C_DESC_SIZE_SHIFT 0 +/* + * Completion queue counter configuration. + * Completion FIFO in use counter measured in words or descriptors + * 1 - Words + * 0 - Descriptors + */ +#define UDMA_S2M_COMP_CFG_1C_CNT_WORDS (1 << 8) +/* + * Enable promotion of the current queue in progress in the completion write + * scheduler. + */ +#define UDMA_S2M_COMP_CFG_1C_Q_PROMOTION (1 << 12) +/* Force RR arbitration in the completion arbiter */ +#define UDMA_S2M_COMP_CFG_1C_FORCE_RR (1 << 16) +/* Minimum number of free completion entries to qualify for promotion */ +#define UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_MASK 0xF0000000 +#define UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_SHIFT 28 + +/**** cfg_2c register ****/ +/* + * Completion FIFO size. + * (words per queue) + */ +#define UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_MASK 0x00000FFF +#define UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_SHIFT 0 +/* + * Unacknowledged FIFO size. + * (descriptors) + */ +#define UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK 0x0FFF0000 +#define UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_SHIFT 16 + +/**** reg_1 register ****/ +/* + * Descriptor prefetch FIFO size + * (descriptors) + */ +#define UDMA_S2M_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_MASK 0x000000FF +#define UDMA_S2M_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_SHIFT 0 + +/**** reg_3 register ****/ +/* + * Maximum number of data beats in the data write FIFO. + * Defined based on data FIFO size + * (default FIFO size 512B →32 beats) + */ +#define UDMA_S2M_FEATURE_REG_3_DATA_FIFO_DEPTH_MASK 0x000003FF +#define UDMA_S2M_FEATURE_REG_3_DATA_FIFO_DEPTH_SHIFT 0 +/* + * Maximum number of packets in the data write FIFO. + * Defined based on header FIFO size + */ +#define UDMA_S2M_FEATURE_REG_3_DATA_WR_MAX_PKT_LIMIT_MASK 0x00FF0000 +#define UDMA_S2M_FEATURE_REG_3_DATA_WR_MAX_PKT_LIMIT_SHIFT 16 + +/**** reg_4 register ****/ +/* + * Completion FIFO size. + * (words per queue) + */ +#define UDMA_S2M_FEATURE_REG_4_COMP_FIFO_DEPTH_MASK 0x00000FFF +#define UDMA_S2M_FEATURE_REG_4_COMP_FIFO_DEPTH_SHIFT 0 +/* + * Unacknowledged FIFO size. + * (descriptors) + */ +#define UDMA_S2M_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_MASK 0x0FFF0000 +#define UDMA_S2M_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_SHIFT 16 + +/**** reg_5 register ****/ +/* Maximum number of outstanding data writes to the AXI */ +#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_WR_OSTAND_MASK 0x0000003F +#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_WR_OSTAND_SHIFT 0 +/* + * Maximum number of outstanding data beats for data write to AXI. + * (AXI beats) + */ +#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_BEATS_WR_OSTAND_MASK 0x0000FF00 +#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_BEATS_WR_OSTAND_SHIFT 8 +/* + * Maximum number of outstanding descriptor reads to the AXI. + * (AXI transactions) + */ +#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_MASK 0x003F0000 +#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_SHIFT 16 +/* + * Maximum number of outstanding data beats for descriptor write to AXI. + * (AXI beats) + */ +#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_MASK 0xFF000000 +#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_SHIFT 24 + +/**** reg_6 register ****/ +/* Maximum number of outstanding descriptor reads to the AXI */ +#define UDMA_S2M_FEATURE_REG_6_MAX_DESC_RD_OSTAND_MASK 0x0000003F +#define UDMA_S2M_FEATURE_REG_6_MAX_DESC_RD_OSTAND_SHIFT 0 +/* Maximum number of outstanding stream acknowledges */ +#define UDMA_S2M_FEATURE_REG_6_MAX_STREAM_ACK_MASK 0x001F0000 +#define UDMA_S2M_FEATURE_REG_6_MAX_STREAM_ACK_SHIFT 16 + +/**** cfg register ****/ +/* + * Configure the AXI AWCACHE + * for header write. + */ +#define UDMA_S2M_Q_CFG_AXI_AWCACHE_HDR_MASK 0x0000000F +#define UDMA_S2M_Q_CFG_AXI_AWCACHE_HDR_SHIFT 0 +/* + * Configure the AXI AWCACHE + * for data write. + */ +#define UDMA_S2M_Q_CFG_AXI_AWCACHE_DATA_MASK 0x000000F0 +#define UDMA_S2M_Q_CFG_AXI_AWCACHE_DATA_SHIFT 4 +/* + * Enable operation of this queue. + * Start prefetch. + */ +#define UDMA_S2M_Q_CFG_EN_PREF (1 << 16) +/* Enables the reception of packets from the stream to this queue */ +#define UDMA_S2M_Q_CFG_EN_STREAM (1 << 17) +/* Allow prefetch of less than minimum prefetch burst size. */ +#define UDMA_S2M_Q_CFG_ALLOW_LT_MIN_PREF (1 << 20) +/* + * Configure the AXI AWCACHE + * for completion descriptor write + */ +#define UDMA_S2M_Q_CFG_AXI_AWCACHE_COMP_MASK 0x0F000000 +#define UDMA_S2M_Q_CFG_AXI_AWCACHE_COMP_SHIFT 24 +/* + * AXI QoS + * This value is used in AXI transactions associated with this queue and the + * prefetch and completion arbiters. + */ +#define UDMA_S2M_Q_CFG_AXI_QOS_MASK 0x70000000 +#define UDMA_S2M_Q_CFG_AXI_QOS_SHIFT 28 + +/**** status register ****/ +/* Indicates how many entries are used in the Queue */ +#define UDMA_S2M_Q_STATUS_Q_USED_MASK 0x01FFFFFF +#define UDMA_S2M_Q_STATUS_Q_USED_SHIFT 0 +/* + * prefetch status + * 0 – prefetch operation is stopped + * 1 – prefetch is operational + */ +#define UDMA_S2M_Q_STATUS_PREFETCH (1 << 28) +/* + * Queue receive status + * 0 -queue RX operation is stopped + * 1 – RX queue is active and processing packets + */ +#define UDMA_S2M_Q_STATUS_RX (1 << 29) +/* + * Indicates if the queue is full. + * (Used by the host when head pointer equals tail pointer) + */ +#define UDMA_S2M_Q_STATUS_Q_FULL (1 << 31) +/* + * S2M Descriptor Ring Base address [31:4]. + * Value of the base address of the S2M descriptor ring + * [3:0] - 0 - 16B alignment is enforced + * ([11:4] should be 0 for 4KB alignment) + */ +#define UDMA_S2M_Q_RDRBP_LOW_ADDR_MASK 0xFFFFFFF0 +#define UDMA_S2M_Q_RDRBP_LOW_ADDR_SHIFT 4 + +/**** RDRL register ****/ +/* + * Length of the descriptor ring. + * (descriptors) + * Associated with the ring base address ends at maximum burst size alignment + */ +#define UDMA_S2M_Q_RDRL_OFFSET_MASK 0x00FFFFFF +#define UDMA_S2M_Q_RDRL_OFFSET_SHIFT 0 + +/**** RDRHP register ****/ +/* + * Relative offset of the next descriptor that needs to be read into the + * prefetch FIFO. + * Incremented when the DMA reads valid descriptors from the host memory to the + * prefetch FIFO. + * Note that this is the offset in # of descriptors and not in byte address. + */ +#define UDMA_S2M_Q_RDRHP_OFFSET_MASK 0x00FFFFFF +#define UDMA_S2M_Q_RDRHP_OFFSET_SHIFT 0 +/* Ring ID */ +#define UDMA_S2M_Q_RDRHP_RING_ID_MASK 0xC0000000 +#define UDMA_S2M_Q_RDRHP_RING_ID_SHIFT 30 + +/**** RDRTP_inc register ****/ +/* + * Increments the value in Q_RDRTP with the value written to this field in + * number of descriptors. + */ +#define UDMA_S2M_Q_RDRTP_INC_VAL_MASK 0x00FFFFFF +#define UDMA_S2M_Q_RDRTP_INC_VAL_SHIFT 0 + +/**** RDRTP register ****/ +/* + * Relative offset of the next free descriptor in the host memory. + * Note that this is the offset in # of descriptors and not in byte address. + */ +#define UDMA_S2M_Q_RDRTP_OFFSET_MASK 0x00FFFFFF +#define UDMA_S2M_Q_RDRTP_OFFSET_SHIFT 0 +/* Ring ID */ +#define UDMA_S2M_Q_RDRTP_RING_ID_MASK 0xC0000000 +#define UDMA_S2M_Q_RDRTP_RING_ID_SHIFT 30 + +/**** RDCP register ****/ +/* Relative offset of the first descriptor in the prefetch FIFO. */ +#define UDMA_S2M_Q_RDCP_OFFSET_MASK 0x00FFFFFF +#define UDMA_S2M_Q_RDCP_OFFSET_SHIFT 0 +/* Ring ID */ +#define UDMA_S2M_Q_RDCP_RING_ID_MASK 0xC0000000 +#define UDMA_S2M_Q_RDCP_RING_ID_SHIFT 30 +/* + * S2M Descriptor Ring Base address [31:4]. + * Value of the base address of the S2M descriptor ring + * [3:0] - 0 - 16B alignment is enforced + * ([11:4] Must be 0 for 4KB alignment) + * NOTE: + * Length of the descriptor ring (in descriptors) associated with the ring base + * address ends at maximum burst size alignment + */ +#define UDMA_S2M_Q_RCRBP_LOW_ADDR_MASK 0xFFFFFFF0 +#define UDMA_S2M_Q_RCRBP_LOW_ADDR_SHIFT 4 + +/**** RCRHP register ****/ +/* + * Relative offset of the next descriptor that needs to be updated by the + * completion controller. + * Note: This is in descriptors and not in byte address. + */ +#define UDMA_S2M_Q_RCRHP_OFFSET_MASK 0x00FFFFFF +#define UDMA_S2M_Q_RCRHP_OFFSET_SHIFT 0 +/* Ring ID */ +#define UDMA_S2M_Q_RCRHP_RING_ID_MASK 0xC0000000 +#define UDMA_S2M_Q_RCRHP_RING_ID_SHIFT 30 + +/**** RCRHP_internal register ****/ +/* + * Relative offset of the next descriptor that needs to be updated by the + * completion controller. + * Note: This is in descriptors and not in byte address. + */ +#define UDMA_S2M_Q_RCRHP_INTERNAL_OFFSET_MASK 0x00FFFFFF +#define UDMA_S2M_Q_RCRHP_INTERNAL_OFFSET_SHIFT 0 +/* Ring ID */ +#define UDMA_S2M_Q_RCRHP_INTERNAL_RING_ID_MASK 0xC0000000 +#define UDMA_S2M_Q_RCRHP_INTERNAL_RING_ID_SHIFT 30 + +/**** comp_cfg register ****/ +/* Enables writing to the completion ring. */ +#define UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE (1 << 0) +/* Disables the completion coalescing function. */ +#define UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL (1 << 1) +/* Reserved */ +#define UDMA_S2M_Q_COMP_CFG_FIRST_PKT_PROMOTION (1 << 2) +/* + * Buffer 2 location. + * Determines the position of the buffer 2 length in the S2M completion + * descriptor. + * 0 - WORD 1 [31:16] + * 1 - WORD 2 [31:16] + */ +#define UDMA_S2M_Q_COMP_CFG_BUF2_LEN_LOCATION (1 << 3) + +/**** pkt_cfg register ****/ +/* Header size. (bytes) */ +#define UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK 0x0000FFFF +#define UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_SHIFT 0 +/* Force header split */ +#define UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT (1 << 16) +/* Enable header split. */ +#define UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT (1 << 17) + +/**** qos_cfg register ****/ +/* Queue QoS */ +#define UDMA_S2M_QOS_CFG_Q_QOS_MASK 0x000000FF +#define UDMA_S2M_QOS_CFG_Q_QOS_SHIFT 0 +/* Reset the tail pointer hardware. */ +#define UDMA_S2M_Q_SW_CTRL_RST_TAIL_PTR (1 << 1) +/* Reset the head pointer hardware. */ +#define UDMA_S2M_Q_SW_CTRL_RST_HEAD_PTR (1 << 2) +/* Reset the current pointer hardware. */ +#define UDMA_S2M_Q_SW_CTRL_RST_CURRENT_PTR (1 << 3) +/* Reset the prefetch FIFO */ +#define UDMA_S2M_Q_SW_CTRL_RST_PREFETCH (1 << 4) +/* Reset the queue */ +#define UDMA_S2M_Q_SW_CTRL_RST_Q (1 << 8) + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_UDMA_S2M_REG_H */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_unit_adapter_regs.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_unit_adapter_regs.h new file mode 100644 index 00000000000000..234c2cad05d0f3 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/al_hal_unit_adapter_regs.h @@ -0,0 +1,266 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#ifndef __AL_HAL_UNIT_ADAPTER_REGS_H__ +#define __AL_HAL_UNIT_ADAPTER_REGS_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#define AL_PCI_COMMAND 0x04 /* 16 bits */ +#define AL_PCI_COMMAND_IO 0x1 /* Enable response in I/O space */ +#define AL_PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */ +#define AL_PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */ + +#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */ + +#define AL_PCI_BASE_ADDRESS_SPACE_IO 0x01 +#define AL_PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */ +#define AL_PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */ +#define AL_PCI_BASE_ADDRESS_DEVICE_ID 0x0c +#define AL_PCI_BASE_ADDRESS_0 0x10 /* 32 bits */ + +#define AL_PCI_BASE_ADDRESS_2 0x18 +#define AL_PCI_BASE_ADDRESS_4 0x20 + +#define AL_PCI_AXI_CFG_AND_CTR_0 0x110 +#define AL_PCI_AXI_CFG_AND_CTR_1 0x130 +#define AL_PCI_AXI_CFG_AND_CTR_2 0x150 +#define AL_PCI_AXI_CFG_AND_CTR_3 0x170 + +#define AL_PCI_APP_CONTROL 0x220 + +#define AL_PCI_SRIOV_TOTAL_AND_INITIAL_VFS 0x30c + +#define AL_PCI_VF_BASE_ADDRESS_0 0x324 + + +#define AL_PCI_EXP_CAP_BASE 0x40 +#define AL_PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define AL_PCI_EXP_DEVCAP_PAYLOAD 0x07 /* Max_Payload_Size */ +#define AL_PCI_EXP_DEVCAP_PHANTOM 0x18 /* Phantom functions */ +#define AL_PCI_EXP_DEVCAP_EXT_TAG 0x20 /* Extended tags */ +#define AL_PCI_EXP_DEVCAP_L0S 0x1c0 /* L0s Acceptable Latency */ +#define AL_PCI_EXP_DEVCAP_L1 0xe00 /* L1 Acceptable Latency */ +#define AL_PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */ +#define AL_PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */ +#define AL_PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */ +#define AL_PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */ +#define AL_PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */ +#define AL_PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */ +#define AL_PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define AL_PCI_EXP_DEVCTL 8 /* Device Control */ +#define AL_PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */ +#define AL_PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */ +#define AL_PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */ +#define AL_PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */ +#define AL_PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */ +#define AL_PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */ +#define AL_PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */ +#define AL_PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */ +#define AL_PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */ +#define AL_PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */ +#define AL_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */ +#define AL_PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ +#define AL_PCI_EXP_DEVSTA 0xA /* Device Status */ +#define AL_PCI_EXP_DEVSTA_CED 0x01 /* Correctable Error Detected */ +#define AL_PCI_EXP_DEVSTA_NFED 0x02 /* Non-Fatal Error Detected */ +#define AL_PCI_EXP_DEVSTA_FED 0x04 /* Fatal Error Detected */ +#define AL_PCI_EXP_DEVSTA_URD 0x08 /* Unsupported Request Detected */ +#define AL_PCI_EXP_DEVSTA_AUXPD 0x10 /* AUX Power Detected */ +#define AL_PCI_EXP_DEVSTA_TRPND 0x20 /* Transactions Pending */ +#define AL_PCI_EXP_LNKCAP 0xC /* Link Capabilities */ +#define AL_PCI_EXP_LNKCAP_SLS 0xf /* Supported Link Speeds */ +#define AL_PCI_EXP_LNKCAP_SLS_2_5GB 0x1 /* LNKCAP2 SLS Vector bit 0 (2.5GT/s) */ +#define AL_PCI_EXP_LNKCAP_SLS_5_0GB 0x2 /* LNKCAP2 SLS Vector bit 1 (5.0GT/s) */ +#define AL_PCI_EXP_LNKCAP_MLW 0x3f0 /* Maximum Link Width */ +#define AL_PCI_EXP_LNKCAP_ASPMS 0xc00 /* ASPM Support */ +#define AL_PCI_EXP_LNKCAP_L0SEL 0x7000 /* L0s Exit Latency */ +#define AL_PCI_EXP_LNKCAP_L1EL 0x38000 /* L1 Exit Latency */ +#define AL_PCI_EXP_LNKCAP_CLKPM 0x40000 /* L1 Clock Power Management */ +#define AL_PCI_EXP_LNKCAP_SDERC 0x80000 /* Surprise Down Error Reporting Capable */ +#define AL_PCI_EXP_LNKCAP_DLLLARC 0x100000 /* Data Link Layer Link Active Reporting Capable */ +#define AL_PCI_EXP_LNKCAP_LBNC 0x200000 /* Link Bandwidth Notification Capability */ +#define AL_PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */ + +#define AL_PCI_EXP_LNKSTA 0x12 /* Link Status */ +#define AL_PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#define AL_PCI_EXP_LNKSTA_CLS_2_5GB 0x01 /* Current Link Speed 2.5GT/s */ +#define AL_PCI_EXP_LNKSTA_CLS_5_0GB 0x02 /* Current Link Speed 5.0GT/s */ +#define AL_PCI_EXP_LNKSTA_CLS_8_0GB 0x03 /* Current Link Speed 8.0GT/s */ +#define AL_PCI_EXP_LNKSTA_NLW 0x03f0 /* Nogotiated Link Width */ +#define AL_PCI_EXP_LNKSTA_NLW_SHIFT 4 /* start of NLW mask in link status */ +#define AL_PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */ +#define AL_PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */ +#define AL_PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */ +#define AL_PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */ +#define AL_PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */ + +#define AL_PCI_EXP_LNKCTL2 0x30 /* Link Control 2 */ + +#define AL_PCI_MSIX_MSGCTRL 0 /* MSIX message control reg */ +#define AL_PCI_MSIX_MSGCTRL_TBL_SIZE 0x7ff /* MSIX table size */ +#define AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT 16 /* MSIX table size shift */ +#define AL_PCI_MSIX_MSGCTRL_EN 0x80000000 /* MSIX enable */ +#define AL_PCI_MSIX_MSGCTRL_MASK 0x40000000 /* MSIX mask */ + +#define AL_PCI_MSIX_TABLE 0x4 /* MSIX table offset and bar reg */ +#define AL_PCI_MSIX_TABLE_OFFSET 0xfffffff8 /* MSIX table offset */ +#define AL_PCI_MSIX_TABLE_BAR 0x7 /* MSIX table BAR */ + +#define AL_PCI_MSIX_PBA 0x8 /* MSIX pba offset and bar reg */ +#define AL_PCI_MSIX_PBA_OFFSET 0xfffffff8 /* MSIX pba offset */ +#define AL_PCI_MSIX_PBA_BAR 0x7 /* MSIX pba BAR */ + + +/* Adapter power management register 0 */ +#define AL_ADAPTER_PM_0 0x80 +#define AL_ADAPTER_PM_0_PM_NEXT_CAP_MASK 0xff00 +#define AL_ADAPTER_PM_0_PM_NEXT_CAP_SHIFT 8 +#define AL_ADAPTER_PM_0_PM_NEXT_CAP_VAL_MSIX 0x90 + +/* Adapter power management register 1 */ +#define AL_ADAPTER_PM_1 0x84 +#define AL_ADAPTER_PM_1_PME_EN 0x100 /* PM enable */ +#define AL_ADAPTER_PM_1_PWR_STATE_MASK 0x3 /* PM state mask */ +#define AL_ADAPTER_PM_1_PWR_STATE_D3 0x3 /* PM D3 state */ + +/* + * Generic control register + */ +#define AL_ADAPTER_SMCC 0x110 /* Sub Master Configuration & Control */ +#define AL_ADAPTER_GENERIC_CONTROL_0 0x1E0 +/* Enable clock gating */ +#define AL_ADAPTER_GENERIC_CONTROL_0_CLK_GATE_EN 0x01 +/* When set, all transactions through the PCI conf & mem BARs get timeout */ +#define AL_ADAPTER_GENERIC_CONTROL_0_ADAPTER_DIS 0x40 +#define AL_ADAPTER_GENERIC_CONTROL_11 0x220 /* Generic Control registers */ + +#define AL_ADAPTER_GENERIC_CONTROL_0_ETH_RESET_1GMAC AL_BIT(18) +#define AL_ADAPTER_GENERIC_CONTROL_0_ETH_RESET_1GMAC_ON_FLR AL_BIT(26) + +#define AL_ADAPTER_GENERIC_CONTROL_2 0x1E8 +#define AL_ADAPTER_GENERIC_CONTROL_3 0x1EC + +/* + * Unit adapter inline functions + */ + +/** + * Perform function level reset and takes care for all needed PCIe config space + * register save and restore. + * Utilizes reading/writing to the pcie config space and for performing the + * actual reset. + * + * @param pcie_read_config_u32 + * pointer to function that reads register from pcie config space + * @param pcie_write_config_u32 + * register to pcie config space + * + * @param pcie_flr + * pointer to function that makes the actual reset. + * That function is responsible for performing the post reset + * delay. + * + * @param handle + * pointer passes to the above functions as first parameter + */ +static inline void al_pcie_perform_flr(int (* pcie_read_config_u32)(void *handle, int where, uint32_t *val), + int (* pcie_write_config_u32)(void *handle, int where, uint32_t val), + int (* pcie_flr)(void *handle), + void *handle) +{ + int i; + uint32_t cfg_reg_store[11]; + + i = 0; + pcie_read_config_u32(handle, AL_PCI_COMMAND, + &cfg_reg_store[i++]); + pcie_read_config_u32(handle, AL_PCI_BASE_ADDRESS_DEVICE_ID, + &cfg_reg_store[i++]); + pcie_read_config_u32(handle, AL_PCI_BASE_ADDRESS_0, + &cfg_reg_store[i++]); + pcie_read_config_u32(handle, AL_PCI_BASE_ADDRESS_2, + &cfg_reg_store[i++]); + pcie_read_config_u32(handle, AL_PCI_BASE_ADDRESS_4, + &cfg_reg_store[i++]); + pcie_read_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_0, + &cfg_reg_store[i++]); + pcie_read_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_1, + &cfg_reg_store[i++]); + pcie_read_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_2, + &cfg_reg_store[i++]); + pcie_read_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_3, + &cfg_reg_store[i++]); + pcie_read_config_u32(handle, AL_PCI_APP_CONTROL, + &cfg_reg_store[i++]); + pcie_read_config_u32(handle, AL_PCI_VF_BASE_ADDRESS_0, + &cfg_reg_store[i++]); + + pcie_flr(handle); + + i = 0; + pcie_write_config_u32(handle, AL_PCI_COMMAND, + cfg_reg_store[i++]); + pcie_write_config_u32(handle, AL_PCI_BASE_ADDRESS_DEVICE_ID, + cfg_reg_store[i++]); + pcie_write_config_u32(handle, AL_PCI_BASE_ADDRESS_0, + cfg_reg_store[i++]); + pcie_write_config_u32(handle, AL_PCI_BASE_ADDRESS_2, + cfg_reg_store[i++]); + pcie_write_config_u32(handle, AL_PCI_BASE_ADDRESS_4, + cfg_reg_store[i++]); + pcie_write_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_0, + cfg_reg_store[i++]); + pcie_write_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_1, + cfg_reg_store[i++]); + pcie_write_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_2, + cfg_reg_store[i++]); + pcie_write_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_3, + cfg_reg_store[i++]); + pcie_write_config_u32(handle, AL_PCI_APP_CONTROL, + cfg_reg_store[i++]); + pcie_write_config_u32(handle, AL_PCI_VF_BASE_ADDRESS_0, + cfg_reg_store[i++]); +} + + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/alpine_machine.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/alpine_machine.h new file mode 100644 index 00000000000000..3394b10738e44a --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/alpine_machine.h @@ -0,0 +1,66 @@ +/* + * linux/arch/arm/mach-alpine/include/mach/alpine_machine.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ALPINE_MACHINE_H__ +#define __ALPINE_MACHINE_H__ + +/* Get SerDes group regs base, to be used in relevant Alpine drivers. + * Valid group is 0..3. + * Returns virtual base address of the group regs base. */ +void __iomem *alpine_serdes_resource_get(u32 group); + +/* SerDes ETH mode */ +enum alpine_serdes_eth_mode { + ALPINE_SERDES_ETH_MODE_SGMII, + ALPINE_SERDES_ETH_MODE_KR, +}; + +/* + * Set SerDes ETH mode for an entire group, unless already set + * Valid group is 0..3. + * Returns 0 upon success. + */ +int alpine_serdes_eth_mode_set( + u32 group, + enum alpine_serdes_eth_mode mode); + +/* Lock the all serdes group for using common registers */ +void alpine_serdes_eth_group_lock(u32 group); + +/* Unlock the all serdes group for using common registers */ +void alpine_serdes_eth_group_unlock(u32 group); + +/* Alpine CPU Power Management Services Initialization */ +void __init alpine_cpu_pm_init(void); + +/* Determine whether Alpine CPU PM services are available */ +int alpine_cpu_suspend_wakeup_supported(void); + +/* Wake-up a CPU */ +void alpine_cpu_wakeup(unsigned int cpu, uintptr_t resume_addr); + +/* Power-off a CPU permanently */ +void alpine_cpu_die(unsigned int cpu); + +/* Kill a CPU */ +int alpine_cpu_kill(unsigned int cpu); + +/* Suspend a CPU temporarily */ +void alpine_cpu_suspend(void); + +#endif /* __ALPINE_MACHINE_H__ */ diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/debug-macro.S b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/debug-macro.S new file mode 100644 index 00000000000000..e886bf463b8752 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/debug-macro.S @@ -0,0 +1,32 @@ +/* + * Early serial output macro for Alpine + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#define DEBUG_ALPINE_SB_BASE 0xfc000000 +#define DEBUG_ALPINE_PBS_OFFSET 0x01880000 +#define DEBUG_ALPINE_UART_OFFSET 0x3000 + + .macro addruart,rp,rv, tmp + mov \rp, #DEBUG_ALPINE_UART_OFFSET + orr \rp, \rp, #DEBUG_ALPINE_PBS_OFFSET + orr \rv, \rp, #DEBUG_ALPINE_SB_BASE @ virtual base + orr \rp, \rp, #DEBUG_ALPINE_SB_BASE @ physical base + .endm + +#define UART_SHIFT 2 +#include + diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/uncompress.h b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/uncompress.h new file mode 100644 index 00000000000000..d75db4df4343eb --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/include/mach/uncompress.h @@ -0,0 +1,59 @@ +/* + * linux/arch/arm/mach-alpine/include/mach/uncompress.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include + +#define get_uart_base() (AL_UART_BASE(0)) + +#define SERIAL_BASE get_uart_base() + +static void putc(const char c) +{ + unsigned char *base = (unsigned char *)SERIAL_BASE; + int i; + + for (i = 0; i < 0x1000; i++) { + if (base[UART_LSR << 2] & UART_LSR_THRE) + break; + barrier(); + } + + base[UART_TX << 2] = c; +} + +static void flush(void) +{ + unsigned char *base = (unsigned char *)SERIAL_BASE; + unsigned char mask; + int i; + + mask = UART_LSR_TEMT | UART_LSR_THRE; + + for (i = 0; i < 0x1000; i++) { + if ((base[UART_LSR << 2] & mask) == mask) + break; + barrier(); + } +} + +/* + * nothing to do + */ +#define arch_decomp_setup() +#define arch_decomp_wdog() diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/irq-alpine-msi.c b/target/linux/alpine/files/arch/arm/mach-alpine/irq-alpine-msi.c new file mode 100644 index 00000000000000..913223a53c1abf --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/irq-alpine-msi.c @@ -0,0 +1,304 @@ +/* + * Annapurna Labs MSIX support services + * + * Copyright (C) 2016, Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Antoine Tenart + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* MSIX message address format: local GIC target */ +#define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16) + +static u32 al_irq_msi_addr_high; +static u32 al_irq_msi_addr_low; + +struct alpine_msix_data { + spinlock_t msi_map_lock; + phys_addr_t addr; + u32 spi_first; /* The SGI number that MSIs start */ + u32 num_spis; /* The number of SGIs for MSIs */ + unsigned long *msi_map; +}; + +static void alpine_msix_mask_msi_irq(struct irq_data *d) +{ + pci_msi_mask_irq(d); + irq_chip_mask_parent(d); +} + +static void alpine_msix_unmask_msi_irq(struct irq_data *d) +{ + pci_msi_unmask_irq(d); + irq_chip_unmask_parent(d); +} + +static struct irq_chip alpine_msix_irq_chip = { + .name = "MSIx", + .irq_mask = alpine_msix_mask_msi_irq, + .irq_unmask = alpine_msix_unmask_msi_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, +}; + +static int alpine_msix_allocate_sgi(struct alpine_msix_data *priv, int num_req) +{ + int first; + + spin_lock(&priv->msi_map_lock); + + first = bitmap_find_next_zero_area(priv->msi_map, priv->num_spis, 0, + num_req, 0); + if (first >= priv->num_spis) { + spin_unlock(&priv->msi_map_lock); + return -ENOSPC; + } + + bitmap_set(priv->msi_map, first, num_req); + + spin_unlock(&priv->msi_map_lock); + + return priv->spi_first + first; +} + +static void alpine_msix_free_sgi(struct alpine_msix_data *priv, unsigned sgi, + int num_req) +{ + int first = sgi - priv->spi_first; + + spin_lock(&priv->msi_map_lock); + + bitmap_clear(priv->msi_map, first, num_req); + + spin_unlock(&priv->msi_map_lock); +} + +static void alpine_msix_compose_msi_msg(struct irq_data *data, + struct msi_msg *msg) +{ + msg->address_hi = al_irq_msi_addr_high; + msg->address_lo = al_irq_msi_addr_low + (1<<16) + (data->hwirq << 3); + +// msg->address_hi = upper_32_bits(msg_addr); +// msg->address_lo = lower_32_bits(msg_addr); + msg->data = 0; +} + +static struct msi_domain_info alpine_msix_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX, + .chip = &alpine_msix_irq_chip, +}; + +static struct irq_chip middle_irq_chip = { + .name = "alpine_msix_middle", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_compose_msi_msg = alpine_msix_compose_msi_msg, +}; + +static int alpine_msix_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, int sgi) +{ + struct irq_fwspec fwspec; + struct irq_data *d; + int ret; + + if (!is_of_node(domain->parent->fwnode)) + return -EINVAL; + + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = 0; + fwspec.param[1] = sgi; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + + ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); + if (ret) + return ret; + + d = irq_domain_get_irq_data(domain->parent, virq); + d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); + + return 0; +} + +static int alpine_msix_middle_domain_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct alpine_msix_data *priv = domain->host_data; + int sgi, err, i; + + sgi = alpine_msix_allocate_sgi(priv, nr_irqs); + if (sgi < 0) + return sgi; + + for (i = 0; i < nr_irqs; i++) { + err = alpine_msix_gic_domain_alloc(domain, virq + i, sgi + i); + if (err) + goto err_sgi; + + irq_domain_set_hwirq_and_chip(domain, virq + i, sgi + i, + &middle_irq_chip, priv); + } + + return 0; + +err_sgi: + while (--i >= 0) + irq_domain_free_irqs_parent(domain, virq, i); + alpine_msix_free_sgi(priv, sgi, nr_irqs); + return err; +} + +static void alpine_msix_middle_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct alpine_msix_data *priv = irq_data_get_irq_chip_data(d); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + alpine_msix_free_sgi(priv, d->hwirq, nr_irqs); +} + +static const struct irq_domain_ops alpine_msix_middle_domain_ops = { + .alloc = alpine_msix_middle_domain_alloc, + .free = alpine_msix_middle_domain_free, +}; + +static int alpine_msix_init_domains(struct alpine_msix_data *priv, + struct device_node *node) +{ + struct irq_domain *middle_domain, *msi_domain, *gic_domain; + struct device_node *gic_node; + + gic_node = of_irq_find_parent(node); + if (!gic_node) { + printk(KERN_EMERG "Failed to find the GIC node\n"); + return -ENODEV; + } + + gic_domain = irq_find_host(gic_node); + if (!gic_domain) { + printk(KERN_EMERG "Failed to find the GIC domain\n"); + return -ENXIO; + } + + middle_domain = irq_domain_add_tree(NULL, + &alpine_msix_middle_domain_ops, + priv); + if (!middle_domain) { + printk(KERN_EMERG "Failed to create the MSIX middle domain\n"); + return -ENOMEM; + } + middle_domain->parent = gic_domain; + + msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), + &alpine_msix_domain_info, + middle_domain); + if (!msi_domain) { + printk(KERN_EMERG "Failed to create MSI domain\n"); + irq_domain_remove(middle_domain); + return -ENOMEM; + } + return 0; +} + +int al_msix_init(void) +{ + struct alpine_msix_data *priv; + struct resource res; + int ret; + struct device_node *node; + /* TODO: do for primary CPU only - what about sync? */ + node = of_find_compatible_node(NULL, NULL, "annapurna-labs,al-msix"); + BUG_ON(!node); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spin_lock_init(&priv->msi_map_lock); + + ret = of_address_to_resource(node, 0, &res); + if (ret) { + printk(KERN_EMERG "Failed to allocate resource\n"); + goto err_priv; + } + + /* + * The 20 least significant bits of addr provide direct information + * regarding the interrupt destination. + * + * To select the primary GIC as the target GIC, bits [18:17] must be set + * to 0x0. In this case, bit 16 (SPI_TARGET_CLUSTER0) must be set. + */ + + al_irq_msi_addr_high = ((u64)res.start) >> 32; + al_irq_msi_addr_low = res.start & 0xffffffff; + + +// priv->addr = (((u64)res.start) >> 32) << 16; +// priv->addr |= res.start & 0xffffffff; + + priv->addr = res.start & GENMASK_ULL(63,20); + priv->addr |= ALPINE_MSIX_SPI_TARGET_CLUSTER0; + + priv->spi_first = 96; + priv->num_spis = 64; +// if (of_property_read_u32(node, "al,msi-base-spi", &priv->spi_first)) { +// pr_err("Unable to parse MSI base\n"); +// ret = -EINVAL; +// goto err_priv; +// } + +// if (of_property_read_u32(node, "al,msi-num-spis", &priv->num_spis)) { +// pr_err("Unable to parse MSI numbers\n"); +// ret = -EINVAL; +// goto err_priv; +// } + + priv->msi_map = kzalloc(sizeof(*priv->msi_map) * BITS_TO_LONGS(priv->num_spis), + GFP_KERNEL); + if (!priv->msi_map) { + ret = -ENOMEM; + goto err_priv; + } + + printk(KERN_EMERG "Registering %d msixs, starting at %d\n", + priv->num_spis, priv->spi_first); + + ret = alpine_msix_init_domains(priv, node); + if (ret) + goto err_map; + + return 0; + +err_map: + kfree(priv->msi_map); +err_priv: + kfree(priv); + return ret; +} +//IRQCHIP_DECLARE(alpine_msix, "annapurna-labs,al-msix", alpine_msix_init); diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/pcie_of.c b/target/linux/alpine/files/arch/arm/mach-alpine/pcie_of.c new file mode 100644 index 00000000000000..89d0f83d87d7b3 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/pcie_of.c @@ -0,0 +1,664 @@ +/* + * Annapurna Labs PCI host bridge device tree driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + * - This driver for both internal PCIe bus and for external PCIe ports + * (in Root-Complex mode). + * - The driver requires PCI_DOMAINS as each port registered as a pci domain + * - for the external PCIe ports, the following applies: + * - Configuration access to bus 0 device 0 are routed to the configuration + * space header register that found in the host bridge. + * - The driver assumes the controller link is initialized by the + * bootloader. + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include "core.h" + +#include "../../drivers/pci/pci.h" + +enum al_pci_type { + AL_PCI_TYPE_INTERNAL = 0, + AL_PCI_TYPE_EXTERNAL = 1, +}; + +/* PCI bridge private data */ +struct al_pcie_pd { + struct device *dev; + enum al_pci_type type; + struct resource ecam; + struct resource mem; + struct resource io; + struct resource realio; + struct resource regs; + struct resource busn; + + void __iomem *ecam_base; + void __iomem *regs_base; + + void __iomem *local_bridge_config_space; + unsigned int index; + /* lock configuration access as we change the target_bus */ + spinlock_t conf_lock; + /*HAL structure*/ + struct al_pcie_port pcie_port; + struct al_pcie_link_status status; + u8 target_bus; +}; + + +static inline struct al_pcie_pd *sys_to_pcie(struct pci_sys_data *sys) +{ + return sys->private_data; +} + +static int al_pcie_enable_controller(struct al_pcie_pd *pcie) +{ + if (pcie->type == AL_PCI_TYPE_INTERNAL) + return 0; + + al_pcie_handle_init(&pcie->pcie_port, pcie->regs_base, pcie->index); + if (al_pcie_function_type_get(&pcie->pcie_port) != AL_PCIE_FUNCTION_MODE_RC) { + dev_err(pcie->dev, "controller is not configured to Root-Complex mode\n"); + return -ENOSYS; + } + + return 0; +} + +static bool al_pcie_port_check_link(struct al_pcie_pd *pcie) +{ + struct al_pcie_link_status *status = &pcie->status; + int rc; + + if (pcie->type == AL_PCI_TYPE_INTERNAL) + return true; + + rc = al_pcie_link_status(&pcie->pcie_port, status); + if (rc < 0) { + dev_err(pcie->dev, "failed to get pcie link status\n"); + return false; + } + if (status->link_up == AL_FALSE) { + dev_info(pcie->dev, "link %u down\n", pcie->index); + return false; + } + dev_info(pcie->dev, "link up: speed Gen %d width x%x\n", + status->speed, status->lanes); + + return true; +} + +/* prepare controller for issueing IO transactions*/ +static int al_pcie_io_prepare(struct al_pcie_pd *pcie) +{ + struct al_pcie_port *pcie_port = &pcie->pcie_port; + if (pcie->type == AL_PCI_TYPE_INTERNAL) { + return 0; + } else { + struct al_pcie_atu_region io_atu_region = { + .enable = AL_TRUE, + .direction = al_pcie_atu_dir_outbound, + .index = 0, + .base_addr = (uint64_t)pcie->io.start, + .limit = (uint64_t)pcie->io.start + resource_size(&pcie->io) - 1, + .target_addr = (uint64_t)pcie->realio.start, /* the address that matches will be translated to this address + offset */ + .invert_matching = AL_FALSE, + .tlp_type = AL_PCIE_TLP_TYPE_IO, /* pcie tlp type*/ + .attr = 0, /* pcie frame header attr field*/ + /* outbound specific params */ + .msg_code = 0, /* pcie message code */ + .cfg_shift_mode = AL_FALSE, + /* inbound specific params*/ + }; + + dev_dbg(pcie->dev, "%s: base %llx, limit %llx, target %llx\n", + __func__, io_atu_region.base_addr, + io_atu_region.limit, io_atu_region.target_addr); + al_pcie_atu_region_set(pcie_port, &io_atu_region); + } + + return 0; +} + +#ifdef CONFIG_AL_PCIE_RMN_1010 +/* prepare controller for issuing mem transactions */ +static int al_pcie_mem_prepare(struct al_pcie_pd *pcie) +{ + struct al_pcie_port *pcie_port = &pcie->pcie_port; + if (pcie->type == AL_PCI_TYPE_INTERNAL) { + return 0; + } else { + struct al_pcie_atu_region mem_atu_region; + + /* + * This region is meant to insure all accesses to this section + * will be always with type memory (accessing from DMA may + * change the type to IO). + */ + mem_atu_region.enable = AL_TRUE; + mem_atu_region.direction = al_pcie_atu_dir_outbound; + mem_atu_region.index = 1; + mem_atu_region.base_addr = pcie->mem.start; + mem_atu_region.limit = pcie->mem.end; + mem_atu_region.target_addr = pcie->mem.start; + mem_atu_region.invert_matching = AL_FALSE; + mem_atu_region.tlp_type = AL_PCIE_TLP_TYPE_MEM; /* pcie tlp type*/ + mem_atu_region.attr = 0; /* pcie frame header attr field*/ + mem_atu_region.msg_code = 0; /* pcie message code */ + mem_atu_region.cfg_shift_mode = AL_FALSE; + mem_atu_region.bar_number = 0; /* not used */ + mem_atu_region.match_mode = 0; /* address match mode */ + mem_atu_region.enable_attr_match_mode = AL_FALSE; + mem_atu_region.enable_msg_match_mode = AL_FALSE; + + dev_dbg(pcie->dev, "%s: base %llx, limit %llx, target %llx\n", + __func__, mem_atu_region.base_addr, + mem_atu_region.limit, mem_atu_region.target_addr); + + al_pcie_atu_region_set(pcie_port, &mem_atu_region); + } + + return 0; +} +#endif + +/* prepare controller for issueing CFG transactions*/ +static int al_pcie_cfg_prepare(struct al_pcie_pd *pcie) +{ + struct al_pcie_port *pcie_port = &pcie->pcie_port; + + if (pcie->type == AL_PCI_TYPE_INTERNAL) + return 0; + + spin_lock_init(&pcie->conf_lock); + pcie->target_bus = 1; + /* + * force the controller to set the pci bus in the TLP to + * pcie->target_bus no matter what is the bus portion of the ECAM addess + * is. + */ + al_pcie_target_bus_set(pcie_port, pcie->target_bus, 0xFF); + + /* the bus connected to the controller always enumberated as bus 1*/ + al_pcie_secondary_bus_set(pcie_port, 1); + /* set subordinary to max value */ + al_pcie_subordinary_bus_set(pcie_port, 0xff); + + return 0; +} + +/* Get ECAM address according to bus, device, function, and offset */ +static void __iomem *al_pcie_cfg_addr(struct al_pcie_pd *pcie, + struct pci_bus *bus, + unsigned int devfn, int offset) +{ + void __iomem *ecam_base = pcie->ecam_base; + unsigned int busnr = bus->number; + int slot = PCI_SLOT(devfn); + void __iomem *ret_val; + + /* Trap out illegal values */ + if (busnr > 255) + BUG(); + if (devfn > 255) + BUG(); + + ret_val = (ecam_base + ((PCI_SLOT(devfn) << 15) | + (PCI_FUNC(devfn) << 12) | + offset)); + if (pcie->type == AL_PCI_TYPE_INTERNAL) + return ret_val; + + /* If there is no link, just show the PCI bridge. */ + if ((pcie->status.link_up == AL_FALSE) && (busnr > 0 || slot > 0)) + return NULL; + + if (busnr == 0) { + if (slot > 0) + return NULL; + ret_val = pcie->local_bridge_config_space; + ret_val += offset; + } else { + if (busnr != pcie->target_bus) { + dev_dbg(pcie->dev, "change target bus number from %d to %d\n", + pcie->target_bus, busnr); + pcie->target_bus = busnr; + al_pcie_target_bus_set(&pcie->pcie_port, + pcie->target_bus, + 0xFF); + } + } + return ret_val; +} + +/* PCI config space read */ +static int al_read_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *val) +{ + struct al_pcie_pd *pcie = sys_to_pcie(bus->sysdata); + int rc = PCIBIOS_SUCCESSFUL; + unsigned long flags; + void __iomem *addr; + u32 v = 0xffffffff; + + dev_dbg(pcie->dev, "read_config from %d size %d dev (domain %d) %d:%d:%d\n", + where, size, pcie->index, + bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); + + spin_lock_irqsave(&pcie->conf_lock, flags); + + addr = al_pcie_cfg_addr(pcie, bus, devfn, where); + dev_dbg(pcie->dev, " read address %p\n", addr); + + if (addr) { + switch (size) { + case 1: + v = readb(addr); + break; + case 2: + v = readw(addr); + break; + case 4: + v = readl(addr); + break; + default: + rc = PCIBIOS_BAD_REGISTER_NUMBER; + } + } else { + rc = PCIBIOS_DEVICE_NOT_FOUND; + } + + spin_unlock_irqrestore(&pcie->conf_lock, flags); + *val = v; + pr_debug("read_config_byte read %#x\n", *val); + return rc; +} + +/* PCI config space write */ +static int al_write_config(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 val) +{ + struct al_pcie_pd *pcie = sys_to_pcie(bus->sysdata); + int rc = PCIBIOS_SUCCESSFUL; + unsigned long flags; + void __iomem *addr; + + dev_dbg(pcie->dev, "write_config_byte %#x to %d size %d dev (domain %d) %d:%d:%d\n", val, + where, size, pcie->index, + bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); + + spin_lock_irqsave(&pcie->conf_lock, flags); + + addr = al_pcie_cfg_addr(pcie, bus, devfn, where); + dev_dbg(pcie->dev, " write address %p\n", addr); + if (addr) { + switch (size) { + case 1: + writeb((u8)val, addr); + break; + + case 2: + writew((u16)val, addr); + break; + + case 4: + writel(val, addr); + break; + default: + rc = PCIBIOS_BAD_REGISTER_NUMBER; + } + } else { + rc = PCIBIOS_DEVICE_NOT_FOUND; + } + spin_unlock_irqrestore(&pcie->conf_lock, flags); + return rc; +} + +/* PCI bridge config space read/write operations */ +static struct pci_ops al_pcie_ops = { + .read = al_read_config, + .write = al_write_config, +}; + +/* PCI config space read */ +static int al_internal_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct al_pcie_pd *pcie = sys_to_pcie(bus->sysdata); + void __iomem *addr = al_pcie_cfg_addr(pcie, bus, devfn, where & ~3); + u32 v; + +// printk(KERN_EMERG "read_config from %d size %d dev %d:%d:%d\n", where, size, +// bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); + + switch (size) { + case 1: + v = readl(addr); + v = (v >> ((where&0x3)*8)) & 0xff; + break; + case 2: + v = readl(addr); + v = (v >> ((where&0x3)*8)) & 0xffff; + break; + default: + v = readl(addr); + break; + } + + *val = v; + pr_debug("read_config_byte read %#x\n", *val); + return PCIBIOS_SUCCESSFUL; +} + +/* PCI config space write */ +static int al_internal_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct al_pcie_pd *pcie = sys_to_pcie(bus->sysdata); + void __iomem *addr = al_pcie_cfg_addr(pcie, bus, devfn, where); + + pr_debug("write_config %#x to %d size %d dev %d:%d:%d\n", val, + where, size, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); + + switch (size) { + case 1: + writeb((u8)val, addr); + break; + case 2: + writew((u16)val, addr); + break; + case 4: + writel(val, addr); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +/* PCI bridge config space read/write operations */ +static struct pci_ops al_internal_pcie_ops = { + .read = al_internal_read_config, + .write = al_internal_write_config, +}; + +static int al_pcie_setup(int nr, struct pci_sys_data *sys) +{ + struct al_pcie_pd *pcie = sys_to_pcie(sys); + + if (pcie->type == AL_PCI_TYPE_EXTERNAL) + pci_add_resource_offset(&sys->resources, + &pcie->realio, + sys->io_offset); + + pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); + pci_add_resource(&sys->resources, &pcie->busn); + + return 1; +} + +static int al_pcie_parse_dt(struct al_pcie_pd *pcie) +{ + struct device_node *np = pcie->dev->of_node; + struct of_pci_range_iter iter; + int err; + static int index; + + if (pcie->type == AL_PCI_TYPE_EXTERNAL) { + /* Get registers resources */ + err = of_address_to_resource(np, 0, &pcie->regs); + if (err < 0) { + dev_dbg(pcie->dev, "of_address_to_resource(): %d\n", + err); + return err; + } + dev_dbg(pcie->dev, " regs %pR\n", &pcie->regs); + pcie->regs_base = devm_ioremap_resource(pcie->dev, + &pcie->regs); + if (!pcie->regs_base) + return -EADDRNOTAVAIL; + /* set the base address of the configuration space of the local + * bridge + */ + pcie->local_bridge_config_space = pcie->regs_base + 0x2000; + } + /* Get the ECAM, I/O and memory ranges from DT */ + for_each_of_pci_range_compat(&iter, np) { + unsigned long restype = iter.flags & IORESOURCE_TYPE_BITS; + if (restype == 0) { + range_iter_fill_resource(iter, np, &pcie->ecam); + pcie->ecam.flags = IORESOURCE_MEM; + pcie->ecam.name = "ECAM"; + } + if (restype == IORESOURCE_IO) { + range_iter_fill_resource(iter, np, &pcie->io); + range_iter_fill_resource(iter, np, &pcie->realio); + pcie->realio.start = iter.pci_addr; + pcie->realio.end = iter.pci_addr + iter.size - 1; + pcie->io.name = "I/O"; + + pci_remap_iospace(&pcie->realio, iter.cpu_addr); + } + if (restype == IORESOURCE_MEM) { + range_iter_fill_resource(iter, np, &pcie->mem); + pcie->mem.name = "MEM"; + } + } + + /* map ecam space */ + dev_dbg(pcie->dev, " ecam %pr\n", &pcie->ecam); + pcie->ecam_base = devm_ioremap_resource(pcie->dev, &pcie->ecam); + if (!pcie->ecam_base) + return -EADDRNOTAVAIL; + + err = of_pci_parse_bus_range(np, &pcie->busn); + if (err < 0) { + dev_err(pcie->dev, "failed to parse ranges property: %d\n", + err); + pcie->busn.name = np->name; + pcie->busn.start = 1; + pcie->busn.end = 0xff; + pcie->busn.flags = IORESOURCE_BUS; + } + pcie->index = index++; + return 0; +} + +/* map the specified device/slot/pin to an IRQ */ +static int al_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return of_irq_parse_and_map_pci(dev, slot, pin); +} + +static int al_pcie_scan_bus(int nr, struct pci_host_bridge *bridge) +{ + struct pci_sys_data *sys = pci_host_bridge_priv(bridge); + struct al_pcie_pd *pcie = sys_to_pcie(sys); + + list_splice_init(&sys->resources, &bridge->windows); + bridge->dev.parent = pcie->dev; + bridge->sysdata = sys; + bridge->busnr = sys->busnr; + + if (pcie->type == AL_PCI_TYPE_INTERNAL) + bridge->ops = &al_internal_pcie_ops; + else + bridge->ops = &al_pcie_ops; + + return pci_scan_root_bus_bridge(bridge); +} + + +/* + * Fixup function to make sure Max Paylod Size and MaxReadReq + * are set based on host bridge Max capabilities. + */ + +extern int pcie_bus_configure_set(struct pci_dev *dev, void *data); +static void al_pci_fixup(struct pci_dev *dev) +{ + u8 smpss = 0; + pcie_bus_configure_set(dev, &smpss); +} +DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, al_pci_fixup); + + + +static int al_pcie_add_host_bridge(struct al_pcie_pd *pcie) +{ + struct hw_pci hw; + + memset(&hw, 0, sizeof(hw)); + + hw.nr_controllers = 1; +// hw.domain = pcie->index; + hw.private_data = (void **)&pcie; + hw.setup = al_pcie_setup; + hw.scan = al_pcie_scan_bus; + hw.map_irq = al_pcie_map_irq; + + pci_common_init(&hw); + + return 0; +} + +static const struct of_device_id al_pcie_of_match[] = { + { .compatible = "annapurna-labs,al-internal-pcie", .data = (void *)AL_PCI_TYPE_INTERNAL }, + { .compatible = "annapurna-labs,al-pci", .data = (void *)AL_PCI_TYPE_EXTERNAL }, + { }, +}; + +extern uint64_t al_pcie_read_addr_start[AL_SB_PCIE_NUM]; +extern uint64_t al_pcie_read_addr_end[AL_SB_PCIE_NUM]; +extern uint64_t al_pcie_write_addr_start[AL_SB_PCIE_NUM]; +extern uint64_t al_pcie_write_addr_end[AL_SB_PCIE_NUM]; +extern bool al_pcie_address_valid[AL_SB_PCIE_NUM]; + +static int al_pcie_probe(struct platform_device *pdev) +{ + enum al_pci_type type; + const struct of_device_id *of_id; + struct al_pcie_pd *pcie; + int err; + + of_id = of_match_device(al_pcie_of_match, &pdev->dev); + if (of_id) + type = (enum al_pci_type) of_id->data; + else { + return -1; + } + + pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->type = type; + pcie->dev = &pdev->dev; + + err = al_pcie_parse_dt(pcie); + if (err < 0) + return err; + + err = al_pcie_enable_controller(pcie); + if (err) + goto err; + + al_pcie_port_check_link(pcie); + + al_pcie_cfg_prepare(pcie); + + al_pcie_io_prepare(pcie); + +#ifdef CONFIG_AL_PCIE_RMN_1010 + al_pcie_mem_prepare(pcie); + if (pcie->type != AL_PCI_TYPE_INTERNAL) { + + al_pcie_read_addr_start[pcie->index] = + min(pcie->mem.start, + pcie->io.start); + al_pcie_read_addr_end[pcie->index] = + max(pcie->mem.end, + pcie->io.end); + + al_pcie_write_addr_start[pcie->index] = pcie->io.start; + al_pcie_write_addr_end[pcie->index] = pcie->io.end; + + al_pcie_address_valid[pcie->index] = true; + + dev_info(&pdev->dev, "%s: [pcie %d] use DMA for read from %llx to %llx\n", + __func__, pcie->index, al_pcie_read_addr_start[pcie->index], + al_pcie_read_addr_end[pcie->index]); + + dev_info(&pdev->dev, "%s: [pcie %d] use DMA for write from %llx to %llx\n", + __func__, pcie->index, al_pcie_write_addr_start[pcie->index], + al_pcie_write_addr_end[pcie->index]); + + /* + * set an axi IO bar to make the accesses to this addresses + * with size of 4 bytes. (access from DMA will be 16 Bytes minimum) + */ + al_pcie_axi_io_config( + &pcie->pcie_port, + al_pcie_read_addr_start[pcie->index], + al_pcie_read_addr_end[pcie->index]); + } +#endif + + /* Configure IOCC for external PCIE */ + if (pcie->type != AL_PCI_TYPE_INTERNAL) { + if (pdev->dev.archdata.hwcc) { + printk("Configuring PCIE for IOCC\n"); + al_pcie_port_snoop_config(&pcie->pcie_port, 1); + } + } +udelay(1000); + err = al_pcie_add_host_bridge(pcie); + if (err < 0) { + dev_err(&pdev->dev, "failed to enable PCIe controller: %d\n", + err); + goto enable_err; + } + + platform_set_drvdata(pdev, pcie); + return 0; +enable_err: +err: + return err; +} + +static struct platform_driver al_pcie_driver = { + .driver = { + .name = "al-pcie", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(al_pcie_of_match), + }, + .probe = al_pcie_probe, +}; +module_platform_driver(al_pcie_driver); diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/platsmp.c b/target/linux/alpine/files/arch/arm/mach-alpine/platsmp.c new file mode 100644 index 00000000000000..1f9eada2631afa --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/platsmp.c @@ -0,0 +1,173 @@ +/* + * linux/arch/arm/mach-alpine/platsmp.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +/* XXX alpine_pen_release is cargo culted code - DO NOT COPY XXX */ +volatile int alpine_pen_release = -1; + +extern void secondary_startup(void); + +static void ca15x4_init_cpu_map(void) +{ + unsigned int i, ncores; + + asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (ncores)); + ncores = ((ncores >> 24) & 3) + 1; + + for (i = 0; i < ncores; i++) + set_cpu_possible(i, true); +} + +static void ca15x4_smp_enable(unsigned int max_cpus) +{ + int i; + + for (i = 0; i < max_cpus; i++) + set_cpu_present(i, true); +} + + +/* + * Write pen_release in a way that is guaranteed to be visible to all + * observers, irrespective of whether they're taking part in coherency + * or not. This is necessary for the hotplug code to work reliably. + */ +static void write_pen_release(int val) +{ + alpine_pen_release = val; + smp_wmb(); + __cpuc_flush_dcache_area((void *)&alpine_pen_release, sizeof(alpine_pen_release)); + outer_clean_range(__pa(&alpine_pen_release), __pa(&alpine_pen_release + 1)); +} + +static DEFINE_SPINLOCK(boot_lock); + +void platform_secondary_init(unsigned int cpu) +{ + /* + * let the primary processor know we're out of the + * pen, then head off into the C entry point + */ + write_pen_release(-1); + + /* + * Synchronise with the boot thread. + */ + spin_lock(&boot_lock); + spin_unlock(&boot_lock); +} + +int al_boot_secondary(unsigned int cpu + , struct task_struct *idle) +{ + unsigned long timeout; + + /* Check CPU resume regs validity */ + if (!alpine_cpu_suspend_wakeup_supported()) { + WARN(1, "%s: wakeup not supported!\n", __func__); + return -ENOSYS; + } + + /* + * Set synchronisation state between this boot processor + * and the secondary one + */ + spin_lock(&boot_lock); + + /* + * This is really belt and braces; we hold unintended secondary + * CPUs in the holding pen until we're ready for them. However, + * since we haven't sent them a soft interrupt, they shouldn't + * be there. + */ + write_pen_release(cpu); + + /* Wake-up secondary CPU */ + alpine_cpu_wakeup(cpu, virt_to_phys(secondary_startup)); + + /* + * Send the secondary CPU a soft interrupt, thereby causing + * the boot monitor to read the system wide flags register, + * and branch to the address found there. + */ + arch_send_wakeup_ipi_mask(cpumask_of(cpu)); + + timeout = jiffies + (1 * HZ); + while (time_before(jiffies, timeout)) { + smp_rmb(); + if (alpine_pen_release == -1) + break; + + udelay(10); + } + + /* + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ + spin_unlock(&boot_lock); + + return alpine_pen_release != -1 ? -ENOSYS : 0; +} + +/* + * Initialise the CPU possible map early - this describes the CPUs + * which may be present or become present in the system. + */ +void __init al_smp_init_cpus(void) +{ + ca15x4_init_cpu_map(); +} + +void __init platform_smp_prepare_cpus(unsigned int max_cpus) +{ + alpine_cpu_pm_init(); + + /* + * Initialise the present map, which describes the set of CPUs + * actually populated at the present time. + */ + ca15x4_smp_enable(max_cpus); +} + +extern int alpine_suspend_finish(unsigned long); + +struct smp_operations __initdata al_smp_ops = { + .smp_init_cpus = al_smp_init_cpus, + .smp_prepare_cpus = platform_smp_prepare_cpus, + .smp_secondary_init = platform_secondary_init, + .smp_boot_secondary = al_boot_secondary, +#ifdef CONFIG_HOTPLUG_CPU + .cpu_die = alpine_cpu_die, + .cpu_kill = alpine_cpu_kill, +#endif +}; diff --git a/target/linux/alpine/files/arch/arm/mach-alpine/sleep-alpine.S b/target/linux/alpine/files/arch/arm/mach-alpine/sleep-alpine.S new file mode 100644 index 00000000000000..ae799c17905188 --- /dev/null +++ b/target/linux/alpine/files/arch/arm/mach-alpine/sleep-alpine.S @@ -0,0 +1,45 @@ +/* + * Annapurna labs cpu-idle handler. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include + +ENTRY(alpine_suspend_finish) + mrc p15, 0, r0, c1, c0, 0 + bic r0, r0, #(1 << 2) @ Disable the C bit + mcr p15, 0, r0, c1, c0, 0 + mov r8, lr @backup lr + bl v7_flush_dcache_louis + mov lr, r8 + mrc p15, 0, r0, c1, c0, 1 + bic r0, r0, #(1 << 6) @ Disable SMP bit + mcr p15, 0, r0, c1, c0, 1 + isb + dsb + @This should shutdown + wfi + @We didn't shutdown. probably - we have a pending interrupt. + mrc p15, 0, r0, c1, c0, 0 + orr r0, r0, #(1 << 2) @ Enable the C bit + mcr p15, 0, r0, c1, c0, 0 + mrc p15, 0, r0, c1, c0, 1 + orr r0, r0, #(1 << 6) @ Enable SMP bit + mcr p15, 0, r0, c1, c0, 1 + isb + dsb + mov pc, lr +ENDPROC(alpine_suspend_finish) diff --git a/target/linux/alpine/files/drivers/cpuidle/cpuidle-alpine.c b/target/linux/alpine/files/drivers/cpuidle/cpuidle-alpine.c new file mode 100644 index 00000000000000..c6d98494d770d9 --- /dev/null +++ b/target/linux/alpine/files/drivers/cpuidle/cpuidle-alpine.c @@ -0,0 +1,97 @@ +/* + * Annapurna labs cpu-idle handler. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "cpuidle.h" + +int alpine_cpu_suspend_wakeup_supported(void); +void alpine_cpu_suspend(void); + +static int alpine_enter_lowpower(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index); + +static struct cpuidle_driver alpine_idle_driver = { + .name = "alpine_idle", + .owner = THIS_MODULE, + .states[0] = ARM_CPUIDLE_WFI_STATE_PWR(250), + .states[1] = { + .enter = alpine_enter_lowpower, + .exit_latency = 10, + .power_usage = 125, + .target_residency = 1000, + .flags = 0, + .name = "C1", + .desc = "ARM power down", + }, + .state_count = 2, +}; + +static DEFINE_PER_CPU(struct cpuidle_device, alpine_cpuidle_device); + +static int alpine_enter_lowpower(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + alpine_cpu_suspend(); + return index; +} + +static int __init alpine_init_cpuidle(void) +{ + int cpu_id; + struct cpuidle_device *device; + struct cpuidle_driver *drv = &alpine_idle_driver; + + if (cpuidle_disabled()) + return -ENOENT; + + if (!(alpine_cpu_suspend_wakeup_supported())) { + pr_err("Annapurna Labs CPUidle components not found\n"); + return -ENOENT; + } + + /* Setup cpuidle driver */ + drv->safe_state_index = 0; + cpuidle_register_driver(&alpine_idle_driver); + + for_each_cpu(cpu_id, cpu_online_mask) { + device = &per_cpu(alpine_cpuidle_device, cpu_id); + device->cpu = cpu_id; +// device->state_count = alpine_idle_driver.state_count; + if (cpuidle_register_device(device)) { + pr_err("CPUidle device registration failed\n,"); + return -EIO; + } + } + + return 0; +} +device_initcall(alpine_init_cpuidle); diff --git a/target/linux/alpine/files/drivers/crypto/al/Kconfig b/target/linux/alpine/files/drivers/crypto/al/Kconfig new file mode 100644 index 00000000000000..6161eb4addaebf --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/Kconfig @@ -0,0 +1,26 @@ +config CRYPTO_DEV_AL_CRYPTO + tristate "Support for Annapurna Labs Crypto engine" + depends on ARCH_ALPINE + select CRYPTO_AES + select CRYPTO_DES + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + select CRYPTO_ALGAPI + select CRYPTO_AUTHENC + select CRYPTO_BLKCIPHER + select CRYPTO_CRC32C + select SG_SPLIT + help + Enable support for the Annapurna Labs Crypto acceleration engine. + Currently AES and SHA256 are supported. + +config CRYPTO_DEV_AL_CRYPTO_STATS + bool "Annapurna Labs Crypto statistics enabled" + depends on CRYPTO_DEV_AL_CRYPTO + help + Enable Annapurna Labs Crypto acceleration engine statistics. + +config CRYPTO_DEV_AL_AHASH_CRC + def_bool CRYPTO_DEV_AL_CRYPTO diff --git a/target/linux/alpine/files/drivers/crypto/al/Makefile b/target/linux/alpine/files/drivers/crypto/al/Makefile new file mode 100644 index 00000000000000..25ff5d5d25b43c --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/Makefile @@ -0,0 +1,14 @@ +ccflags-y := -I$(srctree)/arch/arm/mach-alpine/include + +obj-$(CONFIG_CRYPTO_DEV_AL_CRYPTO) += al_crypto.o + +al_crypto-objs := al_crypto_main.o +al_crypto-objs += al_crypto_core.o +al_crypto-objs += al_crypto_skcipher.o +al_crypto-objs += al_crypto_aead.o +al_crypto-objs += al_crypto_hash.o +al_crypto-objs += al_crypto_crc.o +al_crypto-objs += al_hal_ssm_crypto.o +al_crypto-objs += al_hal_ssm_crc_memcpy.o +al_crypto-objs += al_crypto_sysfs.o +al_crypto-objs += al_crypto_module_params.o diff --git a/target/linux/alpine/files/drivers/crypto/al/README b/target/linux/alpine/files/drivers/crypto/al/README new file mode 100644 index 00000000000000..f103514cced402 --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/README @@ -0,0 +1,333 @@ +Linux driver for Annapurna Labs Cryptographic Accelerator (Crypto) + +Architecture: +============= + +This driver implements standard Linux Crypto API algorithms: +ablkcipher - aes-cbc, aes-ecb, aes-ctr, dec-cbc, des-ecb, +des3-ede-cbc, des3-ede-ecb. +aead - aes-cbc+sha256 +ahash - sha256 (with and without hmac), crc32c + +The Crypto device is implemented as an integrated PCI-E end point, hence the +driver uses the PCI interface for probing the device and other management +functions. + +The driver communicates with the hardware using the Annapurna Labs +Cryptographic Acceleration Engine and GDMA HAL drivers. + +Internal Data Structures: +========================= + +al_crypto_device: +-------------- + This structure holds all the information needed to operate the adapter. + Fields: + - pdev: pointer to Linux PCI device structure + - crypto_dma_params: data structure used to pass various parameters to the HAL + - gdma_regs_base: GDMA registers base address + - hal_crypto: the HAL structure used by HAL to manage the adapter + - msix_entries: pointer to linux data structure used to communicate with the + kernel which entries to use for msix, and which irqs the kernel assigned + for those interrupts. + - irq_tbl: array of al_eth_irq, each interrupt used by the driver has entry + in this array. + - channels: an array of channel information + - max_channels: the maximum number of channels to use + - crc_channels: channels reserved for crc + - channels_kset: kset used to store channel kobjs, used for sysfs + - cleanup_task: cleanup task structure, used during completion interrupts + - int_moderation: when not 0, interrupt frequency (in counter ticks) + - cache: kmem cache for allocating ring entries + - tfm_count: encryption, hash and combined active tfm count + - crc_tfm_count: crc active tfm count + - alg_list: list of registered encryption and combined algorithms + - hash_list: list of registered hash algorithms + - crc_list: list of registed crc algorithms + +al_crypto_chan: +------------ + This structure is used for saving the context of a single channel (queue). + Fields: + - hal_crypto: the HAL structure used by HAL to manage the adapter + - idx: channel index + - type: channel type - ecryption/hash/combined or crc/checksum + - tx_descs_num: number of descriptors in TX queue + - tx_dma_desc_virt: TX descriptor ring + - tx_dma_desc: TX descriptor ring physical base address + - rx_descs_num: number of descriptors in RX queue + - rx_dma_desc_virt: RX descriptor ring + - rx_dma_desc: RX descriptor ring physical base address + - rx_dma_cdesc_virt: RX completion descriptors ring + - rx_dma_cdesc: RX completion descriptors ring physical address + - alloc_order: channel allocation order (log2 of the size) + - sw_ring: SW descriptor ring + - stats_gen: general channel statistics + - stats_gen_lock: general channel statistics lock + - prep_lock: channel transaction preparation lock + - head: SW ring head + - sw_desc_num_locked: number of SW descriptors locked + - tx_desc_produced: number of tx descriptors produced and not issued + - sw_queue: backlog queue, used to queue one last entry when HW queue is full + - stats_prep: preparation statistics + - cache_entries_num: max number of LRU cache entries used by the channel + - cache_lru_list: entries list used for LRU, represents current state + - cache_lru_count: number of entries in LRU list + - cache_lru_entries: LRU entries + - cleanup_lock: operation completion cleanup lock + - tail: SW ring tail + - stats_comp: operation completion statistics + - device: the parent device + - cleanup_task: operation completion cleanup tasklet + - kobj: sysfs kobj + +al_crypto_ctx: +-------------- + This structure is used for saving a context of a single + encryption/hash/combined tfm (SA). + Fields: + - chan: channel that is used for processing the tfm + - cache_state: cache state of current tfm + - sa: the HAL structure that represents the context of current tfm + - hw_sa: HW representation of sa structure + - hw_sa_dma_addr: DMA address of hw_Sa + - sw_hash: sw hash tfm, used for hmac + - iv: IV used for the tfm + - iv_dma_addr: DMA address of the IV used for the tfm + +al_crypto_hash_req_ctx: +----------------------- + This structure is used for saving a context of a single hash request. + Fields: + - buf_0: first buffer used for keeping the data that was not hashed during + current update + - buflen_0: length of buf_0 + - buf_1: second buffer used for keeping the data that was not hashed during + current update + - buflen_1: length of buf_1 + - current_buf: active buffer for keeping the data that was not hashed during + current update + - buf_dma_addr: DMA address of current_buf + - buf_dma_len: length of current_buf + - interm: intermediate state stored between updates + - interm_dma_addr: DMA address of interm + - first: the request is first request after init + - last: the request is last request (final or finup) + - hashed_len: length of data that was hashed + +al_crc_ctx: +----------- + This structure is used for saving a context of a single + crc/checksum tfm (SA). + Fields: + - chan: channel that is used for processing the tfm + - crcsum_type: crc/checksum algorithm type + - key: initial key + +al_crc_req_ctx: +--------------- + This structure is used for saving a context of a single crc/checksum request. + Fields: + - last: the request is last request (final or finup) + - cache_state: cache state of current tfm + - crc_dma_addr: DMA address for crc result + +al_crypto_chan_stats_gen: +------------------------- + DMA channel statistics - general + Fields: + - ablkcipher_tfms: active ablkcipher tfms + - aead_tfms: active aead tfms + - ahash_tfms: active ahash (hash algrorithms) tfms + - crc_tfms: active crc/checksum tfms + +al_crypto_chan_stats_prep: +-------------------------- + DMA channel statistics - preparation + Fields: + - ablkcipher_encrypt_reqs: ablkcipher encrypt requests counter + - ablkcipher_encrypt_bytes: ablkcipher encrypted bytes counter + - ablkcipher_decrypt_reqs: ablkcipher decrypt request counter + - ablkcipher_decrypt_bytes: ablkcipher decrypted bytes counter + - aead_encrypt_hash_reqs: aead (combined) encrypt+hash requests counter + - aead_encrypt_bytes: aead (combined) encrypted bytes counter + - aead_hash_bytes: aead (combined) hashed bytes counter + - aead_decrypt_validate_reqs: aead (combined) decrypt+validate requests + counter + - aead_decrypt_bytes: aead (combined) decrypted bytes counter + - aead_validate_bytes: aead (combined) validated bytes counter + - ahash_reqs: ahash (hash algorithms) requests counter + - ahash_bytes: ahash (hash algorithms) hashed bytes counter + - crc_reqs: crc/checksum requests counter + - crc_bytes: crc/checksum bytes counter + - cache_misses: SA cache misses + - ablkcipher_reqs_le512: ablkcipher requests <= 512 bytes counter + - ablkcipher_reqs_512_2048: ablkcipher requests >512 && <=2048 bytes counter + - ablkcipher_reqs_2048_4096: ablkcipher requests >2048 && <=4096 bytes counter + - ablkcipher_reqs_gt4096: ablkcipher requests >4096 bytes counter + - aead_reqs_le512: aead requests <= 512 bytes counter + - aead_reqs_512_2048: aead requests >512 && <=2048 bytes counter + - aead_reqs_2048_4096: aead requests >2048 && <=4096 bytes counter + - aead_reqs_gt4096: aead requests >4096 bytes counter + - ahash_reqs_le512: ahash (hash algorithms) requests <= 512 bytes counter + - ahash_reqs_512_2048: ahash (hash algorithms) requests >512 && <=2048 bytes + counter + - ahash_reqs_2048_4096: ahash (hash algorithms) requests >2048 && <=4096 bytes + counter + - ahash_reqs_gt4096: ahash (hash algorithms) requests >4096 bytes counter + - crc_reqs_le512: crc/checksum requests <= 512 bytes counter + - crc_reqs_512_2048: crc/checksum requests >512 && <=2048 bytes counter + - crc_reqs_2048_4096: crc/checksum requests >2048 && <=4096 bytes counter + - crc_reqs_gt4096: crc/checksum requests >4096 bytes counter + +al_crypto_chan_stats_comp: +----------------------- + DMA channel statistics - completion + Fields: + - redundant_int_cnt: Total number of redundant interrupts (interrupts for + which there was no completions + - max_active_descs: Maximum number of descriptors that were active + simultaneously + +Interrupts mode: +================ +The Annapurna Labs Crypto Acceleration Engine supports the TrueMultiCore(TM) +technology and is based on Annapurna Labs Unified DMA (aka GDMA), thus it has +an interrupt controller that can generate legacy level sensitive interrupt, +or alternatively, MSI-X interrupt for each cause bit. + +The driver tries first to work in per-queue MSI-X mode for optimal performance, +with MSI-X interrupt for each channel. +If it fails to enable the per-queue MSI-X mode, it tries to use single MSI-X +interrupt for all the events. If it fails, it falls back to single legacy level +sensitive interrupt wire for all the events. + +The systems interrupts status can be viewed in /proc/interrupts. +when legacy mode used, the registered interrupt name will be: +al-crypto-intx-all@pci: +when single MSI-X interrupt mode is used, the registered interrupt name will be: +al-crypto-msix-all@pci: +and when per-queue MSI-X mode is used, for each channel an interrupt will be +registered with the following name: +al-crypto-comp-. + +Memory allocations: +=================== +Cache coherent buffers for following DMA rings: +- TX submission ring +- RX submission ring +- RX completion ring +kmem cache buffers for the SW rings. +All these buffers allocated upon channel creation and freed upon channel +destruction. + +MULTIQUEUE: +=========== +As part of the TrueMultiCore(TM) technology, the driver support multiqueue mode. +This mode have various benefits when channels are allocated to different CPU +cores/threads: +1. Reduced CPU/thread/process contention on a given channel +2. Cache miss rate on transaction completion is reduced +3. In hardware interrupt re-direction + +Currently every tfm is assigned to a queue in a round-robin manner. +Thus, transactions related to a certain tfm will be always serviced using the +same queue. + +crc/checksum algorithms have to use different queue(s) than +encryption/hash/combined algorithms. crc_channels is used to define the number +of channels used for crc/checksum. The queue per tfm assignment described above +is implemented separately for encryption/hash/combined queues and crc queues. + +Locks and atomic variables: +=========================== + +The following locks and atomic variables are used in the driver: +- Atomic counters for ring allocation per tfm (one for encryption/hash/combined + and one for crc) +- Prep lock for locking sw ring (al_crypto_dma_chan->prep_lock), + used also to protect cache management info +- Cleanup lock for completion ring in each channel + (al_crypto_dma_chan->cleanup_lock) + +SR-IOV +====== +- The driver supports SR-IOV capability. + The PF enables SR-IOV depending on modules params: use_virtual_function. +- By default SR-IOV is enabled, in this mode: + The 4 Queues of the VF are dedicated to CRC. + The 4 Queues of the PF are dedicated to Crypto/Hash. + +SA and IV caches: +================= + +Encryption/hash/combined +------------------------ +- SA cache of 16 entries is available in Crypto Accelerator HW. +- The cache is equally divided between active channels used for + encryption/hash/combined algorithms. +- The cache for every channel is managed using LRU. +- We don’t use the data stored by the Crypto HW inside the SA entry. + Thus, if the entry inside the cache has to be replaced, there’s no need to + fetch the cached entry. + +CRC/checksum +------------ +- CRC/checksum don't use SA's/SA cache, there is a separate CRC IV cache for + those algorithms. +- CRC IV cache of 8 entries is available in Crypto HW +- The cache is equally divided between active channels used for crc/checksum + algorithms. +- The cache for every channel will be managed using LRU. +- Cached result stored in IV cache will be used, if available. + +TODO list +========= +- Add support for the rest of the algorithms supported by Annapurna Labs + Cryptographic Acceleration Engine. +- Add other SA cache management algorithms besides LRU. +- insmod/rmmod support. + +File structure +============== + +Module init and PCI registration +-------------------------------- +./al_crypto_main.c + +Driver core +----------- +./al_crypto_core.c +./al_crypto.h + +crypto driver modules params +---------------------------- +./al_crypto_module_params.h +./al_crypto_module_parmas.c + +ablkcipher and aead algorithms +------------------------------ +./al_crypto_alg.c + +hash algorithms +--------------- +./al_crypto_hash.c + +crc/checksum algorithms +----------------------- +./al_crypto_crc.c + +/sys FS registration +-------------------- +./al_crypto_sysfs.c +./al_crypto_sysfs.h + +Hardware abstraction layer +-------------------------- +./al_hal_crypto.c +./al_hal_ssm_crypto.h + +Misc +---- +./README +./Makefile diff --git a/target/linux/alpine/files/drivers/crypto/al/al_crypto.h b/target/linux/alpine/files/drivers/crypto/al/al_crypto.h new file mode 100644 index 00000000000000..f70c2b2ff2feb2 --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/al_crypto.h @@ -0,0 +1,520 @@ +/* + * drivers/crypto/al_crypto.h + * + * Annapurna Labs Crypto driver - header file + * + * Copyright (C) 2012 Annapurna Labs Ltd. + * + * Chained scatter/gather lists handling based on caam driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef __AL_CRYPTO_H__ +#define __AL_CRYPTO_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define AL_CRYPTO_VERSION "0.01" + +#ifndef CONFIG_ALPINE_VP_WA +#define AL_CRYPTO_TX_CDESC_SIZE 8 +#define AL_CRYPTO_RX_CDESC_SIZE 8 +#else +/* Currently in VP it is always 16 bytes */ +#define AL_CRYPTO_TX_CDESC_SIZE 16 +#define AL_CRYPTO_RX_CDESC_SIZE 16 +#endif + +#define AL_CRYPTO_DMA_MAX_CHANNELS 4 + +/* 4 interrupts for the 4 queues and 1 for group D */ +#define AL_CRYPTO_MSIX_INTERRUPTS AL_CRYPTO_DMA_MAX_CHANNELS + 1 + +#define AL_CRYPTO_SW_RING_MIN_ORDER 4 +#define AL_CRYPTO_SW_RING_MAX_ORDER 16 + +/* + * tx: 31(supported by HW) - 1(metadata) - 1(sa_in) - + * 1(enc_iv_in|auth_iv_in) - 1(auth_sign_in) = 27 + * rx: 31(supported by HW) - 1(sa_out) - 1(enc_iv_out|auth_iv_out) - + * 1(next_enc_iv_out) - 1(auth_sign_out) = 27 + */ +#define AL_CRYPTO_OP_MAX_BUFS 27 +#define AL_CRYPTO_HASH_HMAC_IPAD 0x36 +#define AL_CRYPTO_HASH_HMAC_OPAD 0x5c + +#define AL_CRYPTO_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, + DES3_EDE_BLOCK_SIZE */ + +#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS +#define AL_CRYPTO_STATS_INC(var, incval) (var) += (incval) +#define AL_CRYPTO_STATS_DEC(var, decval) (var) -= (decval) +#define AL_CRYPTO_STATS_SET(var, val) (var) = (val) +#define AL_CRYPTO_STATS_LOCK(lock) \ + spin_lock_bh(lock) +#define AL_CRYPTO_STATS_UNLOCK(lock) \ + spin_unlock_bh(lock) +#define AL_CRYPTO_STATS_INIT_LOCK(lock) \ + spin_lock_init(lock) +#else +#define AL_CRYPTO_STATS_INC(var, incval) +#define AL_CRYPTO_STATS_DEC(var, decval) +#define AL_CRYPTO_STATS_SET(var, val) +#define AL_CRYPTO_STATS_LOCK(lock) +#define AL_CRYPTO_STATS_LOCK(lock) +#define AL_CRYPTO_STATS_UNLOCK(lock) +#define AL_CRYPTO_STATS_INIT_LOCK(lock) +#endif + +#define AL_CRYPTO_IRQNAME_SIZE 40 + +#define AL_CRYPTO_INT_MODER_RES 1 + +#define CHKSUM_BLOCK_SIZE 1 +#define CHKSUM_DIGEST_SIZE 4 + +#define MAX_CACHE_ENTRIES_PER_CHANNEL CACHED_SAD_SIZE + +enum al_crypto_req_type { + AL_CRYPTO_REQ_SKCIPHER, + AL_CRYPTO_REQ_AEAD, + AL_CRYPTO_REQ_AHASH, + AL_CRYPTO_REQ_CRC, +}; + +/* software descriptor structure + */ +struct al_crypto_sw_desc { + union { + struct al_crypto_transaction hal_xaction; + struct al_crc_transaction hal_crc_xaction; + }; + + struct al_buf src_bufs[AL_SSM_MAX_SRC_DESCS]; + struct al_buf dst_bufs[AL_SSM_MAX_DST_DESCS]; + + void *req; + int req_type; + int src_nents; + int dst_nents; +}; + +/** + * cache entry in lru list + */ +struct al_crypto_cache_lru_entry { + struct list_head list; + struct al_crypto_cache_state *ctx; + u32 cache_idx; +}; + +#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS +/** + * struct al_crypto_chan_stats_gen - Crypto DMA channel statistics - general + * @skcipher_tfms - active skcipher tfms + * @skcipher_tfms - active aead tfms + * @skcipher_tfms - active ahash tfms + */ +struct al_crypto_chan_stats_gen { + uint64_t skcipher_tfms; + uint64_t aead_tfms; + uint64_t ahash_tfms; + uint64_t crc_tfms; +}; + +/** + * struct al_crypto_chan_stats_prep - Crypto DMA channel statistics - + * preparation + * @skcipher_encrypt_reqs - skcipher encrypt requests + * @skcipher_encrypt_bytes - skcipher encrypted bytes + * @skcipher_decrypt_reqs - skcipher decrypt requests + * @skcipher_decrypt_bytes - skcipher decrypted bytes + * @aead_encrypt_hash_reqs - aead combined encrypt+hash requests + * @aead_encrypt_bytes - aead encrypted bytes + * @aead_hash_bytes - aead hashed bytes + * @aead_decrypt_validate_reqs - aead combined decrypt+validate requests + * @aead_decrypt_bytes - aead decrypted bytes + * @aead_validate_bytes - aead validate bytes + * @ahash_reqs - ahash requests + * @ahash_bytes - ahash hashed bytes + * @cache_misses - SA cache misses + * @skcipher_reqs_le512 - skcipher requests up to 512 bytes + * @skcipher_reqs_512_2048 - skcipher requests between 512 and 2048 bytes + * @skcipher_reqs_2048_4096 - skcipher requests between 2048 and 4096 bytes + * @skcipher_reqs_gt4096 - skcipher requests greater than 4096 bytes + * @aead_reqs_le512 - aead requests up to 512 bytes + * @aead_reqs_512_2048 - aead requests between 512 and 2048 bytes + * @aead_reqs_2048_4096 - aead requests between 2048 and 4096 bytes + * @aead_reqs_gt4096 - aead requests greater than 4096 bytes + * @ahash_reqs_le512 - ahash requests up to 512 bytes + * @ahash_reqs_512_2048 - ahash requests between 512 and 2048 bytes + * @ahash_reqs_2048_4096 - ahash requests between 2048 and 4096 bytes + * @ahash_reqs_gt4096 - ahash requests greater than 4096 bytes + */ +struct al_crypto_chan_stats_prep { + uint64_t skcipher_encrypt_reqs; + uint64_t skcipher_encrypt_bytes; + uint64_t skcipher_decrypt_reqs; + uint64_t skcipher_decrypt_bytes; + uint64_t aead_encrypt_hash_reqs; + uint64_t aead_encrypt_bytes; + uint64_t aead_hash_bytes; + uint64_t aead_decrypt_validate_reqs; + uint64_t aead_decrypt_bytes; + uint64_t aead_validate_bytes; + uint64_t ahash_reqs; + uint64_t ahash_bytes; + uint64_t crc_reqs; + uint64_t crc_bytes; + uint64_t cache_misses; + uint64_t skcipher_reqs_le512; + uint64_t skcipher_reqs_512_2048; + uint64_t skcipher_reqs_2048_4096; + uint64_t skcipher_reqs_gt4096; + uint64_t aead_reqs_le512; + uint64_t aead_reqs_512_2048; + uint64_t aead_reqs_2048_4096; + uint64_t aead_reqs_gt4096; + uint64_t ahash_reqs_le512; + uint64_t ahash_reqs_512_2048; + uint64_t ahash_reqs_2048_4096; + uint64_t ahash_reqs_gt4096; + uint64_t crc_reqs_le512; + uint64_t crc_reqs_512_2048; + uint64_t crc_reqs_2048_4096; + uint64_t crc_reqs_gt4096; +}; + +/** + * struct al_crypto_chan_stats_comp - Crypto DMA channel statistics - + * completion + * @redundant_int_cnt - redundant interrupts (interrupts without completions) + */ +struct al_crypto_chan_stats_comp { + uint64_t redundant_int_cnt; + uint64_t max_active_descs; +}; +#endif + +/* internal representation of a DMA channel + */ +struct al_crypto_chan { + struct al_ssm_dma *hal_crypto; + + int idx; + enum al_ssm_q_type type; + cpumask_t affinity_hint_mask; + + /* Tx UDMA hw ring */ + int tx_descs_num; /* number of descriptors in Tx queue */ + void *tx_dma_desc_virt; /* Tx descriptors ring */ + dma_addr_t tx_dma_desc; + + /* Rx UDMA hw ring */ + int rx_descs_num; /* number of descriptors in Rx queue */ + void *rx_dma_desc_virt; /* Rx descriptors ring */ + dma_addr_t rx_dma_desc; + void *rx_dma_cdesc_virt; /* Rx completion descriptors ring */ + dma_addr_t rx_dma_cdesc; + + /* SW descriptors ring */ + u16 alloc_order; + struct al_crypto_sw_desc **sw_ring; +#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS + struct al_crypto_chan_stats_gen stats_gen; + spinlock_t stats_gen_lock; /* locked during access of general stats */ +#endif + + /* Frequently accessed prep */ + spinlock_t prep_lock ____cacheline_aligned; /* locked during + xaction preparation and + cache management changes */ + u16 head; + int sw_desc_num_locked; /* num of sw descriptors locked during xaction + preparation */ + u32 tx_desc_produced; /* num of hw descriptors generated by HAL */ + struct crypto_queue sw_queue; /* sw queue for backlog */ +#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS + struct al_crypto_chan_stats_prep stats_prep; +#endif + + /* LRU cache management */ + int cache_entries_num; + struct list_head cache_lru_list; + int cache_lru_count; + struct al_crypto_cache_lru_entry cache_lru_entries[ + MAX_CACHE_ENTRIES_PER_CHANNEL]; + + /* Frequently accessed cleanup */ + spinlock_t cleanup_lock ____cacheline_aligned_in_smp; /* locked during + cleanup */ + u16 tail; +#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS + struct al_crypto_chan_stats_comp stats_comp; +#endif + + struct al_crypto_device *device; + struct tasklet_struct cleanup_task; + struct kobject kobj; +}; + +#define to_dev(al_crypto_chan) (&(al_crypto_chan)->device->pdev->dev) + +/* internal structure for AL Crypto IRQ + */ +struct al_crypto_irq { + char name[AL_CRYPTO_IRQNAME_SIZE]; +}; + +/* internal structure for AL Crypto device + */ +struct al_crypto_device { + struct pci_dev *pdev; + + struct al_ssm_dma_params ssm_dma_params; + void __iomem *udma_regs_base; + void __iomem *crypto_regs_base; + + struct al_ssm_dma hal_crypto; + + struct msix_entry msix_entries[AL_CRYPTO_MSIX_INTERRUPTS]; + struct al_crypto_irq irq_tbl[AL_CRYPTO_MSIX_INTERRUPTS]; + struct al_crypto_chan *channels[AL_CRYPTO_DMA_MAX_CHANNELS]; + int num_channels; + int max_channels; + int crc_channels; + struct kset *channels_kset; + struct tasklet_struct cleanup_task; + int int_moderation; + int num_irq_used; + + struct kmem_cache *cache; /* descriptors cache */ + atomic_t tfm_count; /* used to allocate the dma + channel for current tfm */ + atomic_t crc_tfm_count; /* used to allocate the dma + channel for current crc tfm */ + struct list_head skcipher_list; /* list of registered skcipher algorithms */ + struct list_head aead_list; /* list of registered aead algorithms */ + struct list_head hash_list; /* list of registered hash algorithms */ + struct list_head crc_list; /* list of registered crc/csum algorithms */ +}; + +struct al_crypto_cache_state { + bool cached; + int idx; +}; + +/* context structure + */ +struct al_crypto_ctx { + struct al_crypto_chan *chan; + struct al_crypto_cache_state cache_state; + struct al_crypto_sa sa; + struct al_crypto_hw_sa *hw_sa; + dma_addr_t hw_sa_dma_addr; + struct crypto_shash *sw_hash; /* for HMAC key hashing */ + u8 *iv; + dma_addr_t iv_dma_addr; + u8 *hmac_pads; + struct crypto_aes_ctx aes_key; +}; + +/* DMA ring management inline functions */ +static inline u16 al_crypto_ring_size(struct al_crypto_chan *chan) +{ + return 1 << chan->alloc_order; +} + +/* count of transactions in flight with the engine */ +static inline u16 al_crypto_ring_active(struct al_crypto_chan *chan) +{ + return CIRC_CNT(chan->head, chan->tail, al_crypto_ring_size(chan)); +} +static inline u16 al_crypto_ring_space(struct al_crypto_chan *chan) +{ + return CIRC_SPACE(chan->head, chan->tail, al_crypto_ring_size(chan)); +} + +static inline struct al_crypto_sw_desc * +al_crypto_get_ring_ent(struct al_crypto_chan *chan, u16 idx) +{ + return chan->sw_ring[idx & (al_crypto_ring_size(chan) - 1)]; +} + +int al_crypto_get_sw_desc(struct al_crypto_chan *chan, int num); + +void al_crypto_tx_submit(struct al_crypto_chan *chan); + +#ifdef DEBUG +#define set_desc_id(desc, i) ((desc)->id = (i)) +#define desc_id(desc) ((desc)->id) +#else +#define set_desc_id(desc, i) +#define desc_id(desc) (0) +#endif + +static inline void sg_map_to_xaction_buffers(struct scatterlist *sg_in, + struct al_buf* bufs, + unsigned int length, + int *buf_idx) +{ + struct scatterlist *sg = sg_in, *next_sg; + + if (!length) + return; + + bufs[*buf_idx].addr = sg_dma_address(sg); + bufs[*buf_idx].len = 0; + + while (length > sg_dma_len(sg)) { + bufs[*buf_idx].len += sg_dma_len(sg); + + length -= sg_dma_len(sg); + next_sg = sg_next(sg); + + BUG_ON(!next_sg); + + if (sg_dma_address(sg) + sg_dma_len(sg) != sg_dma_address(next_sg)) { + (*buf_idx)++; + bufs[*buf_idx].addr = sg_dma_address(next_sg); + bufs[*buf_idx].len = 0; + } + + sg = next_sg; + } + + /* last sg */ + bufs[*buf_idx].len += length; + (*buf_idx)++; +} + +/* SA cache management using LRU */ +void al_crypto_cache_update_lru(struct al_crypto_chan *chan, + struct al_crypto_cache_state *ctx); + +u32 al_crypto_cache_replace_lru(struct al_crypto_chan *chan, + struct al_crypto_cache_state *ctx, + struct al_crypto_cache_state **old_ctx); + +void al_crypto_cache_remove_lru(struct al_crypto_chan *chan, + struct al_crypto_cache_state *ctx); + +/* Core APIs */ +int al_crypto_core_init( + struct al_crypto_device *device, + void __iomem *iobase_udma, + void __iomem *iobase_app); + +int al_crypto_core_terminate( + struct al_crypto_device *device); + +int al_crypto_cleanup_fn( + struct al_crypto_chan *chan, + int from_tasklet); + +void al_crypto_set_int_moderation( + struct al_crypto_device *device, + int usec); + +int al_crypto_get_int_moderation( + struct al_crypto_device *device); + +int al_crypto_keylen_to_sa_aes_ksize(unsigned int keylen, + enum al_crypto_sa_aes_ksize *ksize); + +int al_crypto_sa_aes_ksize_to_keylen(enum al_crypto_sa_aes_ksize ksize, + unsigned int *keylen); + +void al_crypto_hexdump_sgl(const char *level, struct scatterlist *sgl, + const char *name, off_t skip, int len, gfp_t gfp_flags); + +/* aead related functions */ +void al_crypto_cleanup_single_aead( + struct al_crypto_chan *chan, + struct al_crypto_sw_desc *desc, + u32 comp_status); + +/* ahash related functions */ +void al_crypto_cleanup_single_ahash( + struct al_crypto_chan *chan, + struct al_crypto_sw_desc *desc, + u32 comp_status); + +/* crc related functions */ +void al_crypto_cleanup_single_crc( + struct al_crypto_chan *chan, + struct al_crypto_sw_desc *desc, + u32 comp_status); + +int hmac_setkey(struct al_crypto_ctx *ctx, + const u8 *key, + unsigned int keylen, + unsigned int sw_hash_interm_offset, + unsigned int sw_hash_interm_size); + +/* sysfs */ +void al_crypto_free_channel(struct al_crypto_chan *chan); + +int al_crypto_sysfs_init(struct al_crypto_device *device); + +void al_crypto_sysfs_terminate(struct al_crypto_device *device); + +/* al_crypto_skcipher APIs */ +int al_crypto_skcipher_init(struct al_crypto_device *device); + +void al_crypto_skcipher_terminate(struct al_crypto_device *device); + +void al_crypto_skcipher_cleanup_single( + struct al_crypto_chan *chan, + struct al_crypto_sw_desc *desc, + u32 comp_status); + +int al_crypto_skcipher_process_queue(struct al_crypto_chan *chan); + +/* al_crypto_aead APIs */ +int al_crypto_aead_init(struct al_crypto_device *device); + +void al_crypto_aead_terminate(struct al_crypto_device *device); + +void al_crypto_aead_cleanup_single( + struct al_crypto_chan *chan, + struct al_crypto_sw_desc *desc, + u32 comp_status); + +/* al_crypto_hash APIs */ +int al_crypto_hash_init(struct al_crypto_device *device); + +void al_crypto_hash_terminate(struct al_crypto_device *device); + +/* al_crypto_crc APIs */ +int al_crypto_crc_init(struct al_crypto_device *device); + +void al_crypto_crc_terminate(struct al_crypto_device *device); + +#endif /* __AL_CRYPTO_H__ */ diff --git a/target/linux/alpine/files/drivers/crypto/al/al_crypto_aead.c b/target/linux/alpine/files/drivers/crypto/al/al_crypto_aead.c new file mode 100644 index 00000000000000..30a97f0cee4e47 --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/al_crypto_aead.c @@ -0,0 +1,1043 @@ +/* + * drivers/crypto/al_crypto_aead.c + * + * Annapurna Labs Crypto driver - ablckcipher/aead algorithms + * + * Copyright (C) 2012 Annapurna Labs Ltd. + * + * Algorithm registration code and chained scatter/gather lists + * handling based on caam driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "linux/export.h" +#include "linux/crypto.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "al_crypto.h" + +#define AL_CRYPTO_CRA_PRIORITY 300 + +static int al_crypto_init_tfm(struct crypto_aead *tfm); + +static void al_crypto_exit_tfm(struct crypto_aead *tfm); + +static int al_crypto_setkey(struct crypto_aead *tfm, + const u8 *key, unsigned int keylen); + +static int al_crypto_setauthsize(struct crypto_aead *tfm, + unsigned int authsize); + +static int al_crypto_encrypt(struct aead_request *req); + +static int al_crypto_decrypt(struct aead_request *req); + +struct al_crypto_aead_req_ctx { + u8 iv[AL_CRYPTO_MAX_IV_LENGTH] ____cacheline_aligned; + enum al_crypto_dir dir; + size_t cryptlen; + struct scatterlist *in_sg; + struct scatterlist *out_sg; + struct scatterlist sgl; + size_t pages_sg; + size_t total_in; + size_t total_out; + size_t total_out_save; + int sgs_copied; +}; + +struct al_crypto_alg { + struct list_head entry; + struct al_crypto_device *device; + enum al_crypto_sa_enc_type enc_type; + enum al_crypto_sa_op sa_op; + enum al_crypto_sa_auth_type auth_type; + enum al_crypto_sa_sha2_mode sha2_mode; + char sw_hash_name[CRYPTO_MAX_ALG_NAME]; + unsigned int sw_hash_interm_offset; + unsigned int sw_hash_interm_size; + struct aead_alg alg; +}; + +struct al_crypto_aead_template { + char name[CRYPTO_MAX_ALG_NAME]; + char driver_name[CRYPTO_MAX_ALG_NAME]; + unsigned int blocksize; + enum al_crypto_sa_enc_type enc_type; + enum al_crypto_sa_op sa_op; + enum al_crypto_sa_auth_type auth_type; + enum al_crypto_sa_sha2_mode sha2_mode; + char sw_hash_name[CRYPTO_MAX_ALG_NAME]; + unsigned int sw_hash_interm_offset; + unsigned int sw_hash_interm_size; + struct aead_alg alg; +}; + +static struct al_crypto_aead_template driver_algs[] = { + { + .name = "authenc(hmac(sha1),cbc(aes))", + .driver_name = "authenc-hmac-sha1-cbc-aes-al", + .blocksize = AES_BLOCK_SIZE, + .enc_type = AL_CRYPT_AES_CBC, + .sa_op = AL_CRYPT_ENC_AUTH, + .auth_type = AL_CRYPT_AUTH_SHA1, + .sha2_mode = 0, + .sw_hash_name = "sha1", + .sw_hash_interm_offset = offsetof(struct sha1_state, state), + .sw_hash_interm_size = sizeof(((struct sha1_state *)0)->state), + .alg = { + .init = al_crypto_init_tfm, + .exit = al_crypto_exit_tfm, + .setkey = al_crypto_setkey, + .setauthsize = al_crypto_setauthsize, + .encrypt = al_crypto_encrypt, + .decrypt = al_crypto_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + }, + { + .name = "authenc(hmac(sha256),cbc(aes))", + .driver_name = "authenc-hmac-sha256-cbc-aes-al", + .blocksize = AES_BLOCK_SIZE, + .enc_type = AL_CRYPT_AES_CBC, + .sa_op = AL_CRYPT_ENC_AUTH, + .auth_type = AL_CRYPT_AUTH_SHA2, + .sha2_mode = AL_CRYPT_SHA2_256, + .sw_hash_name = "sha256", + .sw_hash_interm_offset = offsetof(struct sha256_state, state), + .sw_hash_interm_size = sizeof(((struct sha256_state *)0)->state), + .alg = { + .init = al_crypto_init_tfm, + .exit = al_crypto_exit_tfm, + .setkey = al_crypto_setkey, + .setauthsize = al_crypto_setauthsize, + .encrypt = al_crypto_encrypt, + .decrypt = al_crypto_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + }, + { + .name = "authenc(hmac(sha384),cbc(aes))", + .driver_name = "authenc-hmac-sha384-cbc-aes-al", + .blocksize = AES_BLOCK_SIZE, + .enc_type = AL_CRYPT_AES_CBC, + .sa_op = AL_CRYPT_ENC_AUTH, + .auth_type = AL_CRYPT_AUTH_SHA2, + .sha2_mode = AL_CRYPT_SHA2_384, + .sw_hash_name = "sha256", + .sw_hash_interm_offset = offsetof(struct sha512_state, state), + .sw_hash_interm_size = sizeof(((struct sha512_state *)0)->state), + .alg = { + .init = al_crypto_init_tfm, + .exit = al_crypto_exit_tfm, + .setkey = al_crypto_setkey, + .setauthsize = al_crypto_setauthsize, + .encrypt = al_crypto_encrypt, + .decrypt = al_crypto_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + }, + { + .name = "authenc(hmac(sha512),cbc(aes))", + .driver_name = "authenc-hmac-sha512-cbc-aes-al", + .blocksize = AES_BLOCK_SIZE, + .enc_type = AL_CRYPT_AES_CBC, + .sa_op = AL_CRYPT_ENC_AUTH, + .auth_type = AL_CRYPT_AUTH_SHA2, + .sha2_mode = AL_CRYPT_SHA2_512, + .sw_hash_name = "sha512", + .sw_hash_interm_offset = offsetof(struct sha512_state, state), + .sw_hash_interm_size = sizeof(((struct sha512_state *)0)->state), + .alg = { + .init = al_crypto_init_tfm, + .exit = al_crypto_exit_tfm, + .setkey = al_crypto_setkey, + .setauthsize = al_crypto_setauthsize, + .encrypt = al_crypto_encrypt, + .decrypt = al_crypto_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + }, +}; + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_init_tfm(struct crypto_aead *tfm) +{ + struct al_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct aead_alg *alg = crypto_aead_alg(tfm); + struct al_crypto_alg *al_crypto_alg = container_of(alg, struct al_crypto_alg, alg); + struct al_crypto_device *device = al_crypto_alg->device; + int chan_idx = atomic_inc_return(&device->tfm_count) % + (device->num_channels - device->crc_channels); + + dev_dbg(&device->pdev->dev, "%s: cra_name=%s alignmask=%x\n", + __func__, tfm->base.__crt_alg->cra_name, crypto_aead_alignmask(tfm)); + + memset(ctx, 0, sizeof(struct al_crypto_ctx)); + + ctx->chan = device->channels[chan_idx]; + + ctx->sa.enc_type = al_crypto_alg->enc_type; + ctx->sa.sa_op = al_crypto_alg->sa_op; + ctx->sa.auth_type = al_crypto_alg->auth_type; + ctx->sa.sha2_mode = al_crypto_alg->sha2_mode; + + /* Allocate SW hash for hmac long key hashing and key XOR ipad/opad + * intermediate calculations + */ + if (strlen(al_crypto_alg->sw_hash_name)) { + ctx->sw_hash = crypto_alloc_shash(al_crypto_alg->sw_hash_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->sw_hash)) { + dev_err(to_dev(ctx->chan), "failed to allocate sw hash for aead"); + return PTR_ERR(ctx->sw_hash); + } + + ctx->hmac_pads = kmalloc(2 * crypto_shash_descsize(ctx->sw_hash), GFP_KERNEL); + if (!ctx->hmac_pads) + return -ENOMEM; + } + + crypto_aead_set_reqsize(tfm, sizeof(struct al_crypto_aead_req_ctx)); + + ctx->hw_sa = dma_alloc_coherent(&device->pdev->dev, + sizeof(struct al_crypto_hw_sa), + &ctx->hw_sa_dma_addr, + GFP_KERNEL); + + ctx->iv = dma_alloc_coherent(&device->pdev->dev, + AL_CRYPTO_MAX_IV_LENGTH, + &ctx->iv_dma_addr, + GFP_KERNEL); + /* random first IV */ + get_random_bytes(ctx->iv, AL_CRYPTO_MAX_IV_LENGTH); + + AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock); + AL_CRYPTO_STATS_INC(ctx->chan->stats_gen.aead_tfms, 1); + AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_exit_tfm(struct crypto_aead *tfm) +{ + struct al_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct aead_alg *alg = crypto_aead_alg(tfm); + struct al_crypto_alg *al_crypto_alg = container_of(alg, struct al_crypto_alg, alg); + struct al_crypto_device *device = al_crypto_alg->device; + + dev_dbg(&device->pdev->dev, "%s: cra_name=%s\n", + __func__, tfm->base.__crt_alg->cra_name); + + /* LRU list access has to be protected */ + spin_lock_bh(&ctx->chan->prep_lock); + if (ctx->cache_state.cached) + al_crypto_cache_remove_lru(ctx->chan, &ctx->cache_state); + spin_unlock_bh(&ctx->chan->prep_lock); + + if (ctx->sw_hash) + crypto_free_shash(ctx->sw_hash); + + if (ctx->hmac_pads) + kfree(ctx->hmac_pads); + + if (ctx->hw_sa_dma_addr) + dma_free_coherent(&device->pdev->dev, + sizeof(struct al_crypto_hw_sa), + ctx->hw_sa, + ctx->hw_sa_dma_addr); + + if (ctx->iv_dma_addr) + dma_free_coherent(&device->pdev->dev, + AL_CRYPTO_MAX_IV_LENGTH, + ctx->iv, + ctx->iv_dma_addr); + + AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock); + AL_CRYPTO_STATS_DEC(ctx->chan->stats_gen.aead_tfms, 1); + AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock); +} + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_setkey(struct crypto_aead *tfm, + const u8 *key, unsigned int keylen) +{ + struct al_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct aead_alg *alg = crypto_aead_alg(tfm); + struct al_crypto_alg *al_crypto_alg = container_of(alg, struct al_crypto_alg, alg); + struct crypto_authenc_keys authenc_keys; + int rc; + + dev_dbg(to_dev(ctx->chan), "%s: keylen=%d\n", __func__, keylen); + + /* Currently only AES is supported */ + BUG_ON((ctx->sa.enc_type != AL_CRYPT_AES_CBC) && + (ctx->sa.enc_type != AL_CRYPT_AES_ECB) && + (ctx->sa.enc_type != AL_CRYPT_AES_CTR)); + + rc = crypto_authenc_extractkeys(&authenc_keys, key, keylen); + if (rc) + return rc; + + dev_dbg(to_dev(ctx->chan), "%s: authkeylen=%d enckeylen=%d\n", + __func__, authenc_keys.authkeylen, authenc_keys.enckeylen); + print_hex_dump_debug(KBUILD_MODNAME ": authkey: ", + DUMP_PREFIX_OFFSET, 16, 1, + authenc_keys.authkey, authenc_keys.authkeylen, false); + print_hex_dump_debug(KBUILD_MODNAME ": enckey: ", + DUMP_PREFIX_OFFSET, 16, 1, + authenc_keys.enckey, authenc_keys.enckeylen, false); + + if (al_crypto_keylen_to_sa_aes_ksize(authenc_keys.enckeylen, &ctx->sa.aes_ksize)) + return -EINVAL; + + if (ctx->sw_hash) { + rc = hmac_setkey(ctx, authenc_keys.authkey, authenc_keys.authkeylen, + al_crypto_alg->sw_hash_interm_offset, + al_crypto_alg->sw_hash_interm_size); + if (rc) + return rc; + + print_hex_dump_debug(KBUILD_MODNAME ": hmac_iv_in: ", DUMP_PREFIX_OFFSET, + 16, 1, ctx->sa.hmac_iv_in, + al_crypto_alg->sw_hash_interm_size, false); + print_hex_dump_debug(KBUILD_MODNAME ": hmac_iv_out: ", DUMP_PREFIX_OFFSET, + 16, 1, ctx->sa.hmac_iv_out, + al_crypto_alg->sw_hash_interm_size, false); + } + + /* TODO: optimize HAL to hold ptrs to save this memcpy */ + /* copy the key to the sa */ + memcpy(&ctx->sa.enc_key, authenc_keys.enckey, authenc_keys.enckeylen); + + ctx->sa.sign_after_enc = true; + ctx->sa.auth_after_dec = false; + + /* Sets the counter increment to 128 bit to be aligned with the + * linux implementation. We know it contradicts the NIST spec. + * If and when the linux will be aligned with the spec we should fix it + * too. + * This variable is relevant only for CTR, GCM and CCM modes*/ + ctx->sa.cntr_size = AL_CRYPT_CNTR_128_BIT; + + al_crypto_hw_sa_init(&ctx->sa, ctx->hw_sa); + + /* mark the sa as not cached, will update in next xaction */ + spin_lock_bh(&ctx->chan->prep_lock); + if (ctx->cache_state.cached) + al_crypto_cache_remove_lru(ctx->chan, + &ctx->cache_state); + spin_unlock_bh(&ctx->chan->prep_lock); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + struct al_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct aead_alg *alg = crypto_aead_alg(tfm); + int signature_size = (authsize >> 2) - 1; + + dev_dbg(to_dev(ctx->chan), "%s: authsize=%d maxauthsize=%d signature_size=%d\n", + __func__, authsize, alg->maxauthsize, signature_size); + + if (signature_size < 0 || authsize > alg->maxauthsize || (authsize & 3)) + return -EINVAL; + + ctx->sa.signature_size = signature_size; + ctx->sa.auth_signature_msb = true; + + al_crypto_hw_sa_init(&ctx->sa, ctx->hw_sa); + + /* mark the sa as not cached, will update in next xaction */ + spin_lock_bh(&ctx->chan->prep_lock); + if (ctx->cache_state.cached) + al_crypto_cache_remove_lru(ctx->chan, &ctx->cache_state); + spin_unlock_bh(&ctx->chan->prep_lock); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +/* DMA unmap buffers for aead request + */ +static inline void al_crypto_dma_unmap(struct al_crypto_chan *chan, + struct aead_request *req, + int src_nents, int dst_nents, + struct al_crypto_sw_desc *desc) +{ + struct al_crypto_aead_req_ctx *rctx = aead_request_ctx(req); + + if (likely(rctx->in_sg == rctx->out_sg)) { + dma_unmap_sg(to_dev(chan), rctx->in_sg, src_nents, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(to_dev(chan), rctx->in_sg, src_nents, DMA_TO_DEVICE); + dma_unmap_sg(to_dev(chan), rctx->out_sg, dst_nents, DMA_FROM_DEVICE); + } + + if (desc && desc->hal_xaction.enc_iv_in.len) + dma_unmap_single(to_dev(chan), + desc->hal_xaction.enc_iv_in.addr, + desc->hal_xaction.enc_iv_in.len, + DMA_TO_DEVICE); +} + +/****************************************************************************** + *****************************************************************************/ +static inline void al_crypto_update_stats(struct al_crypto_transaction *xaction, + struct al_crypto_chan *chan) +{ + if (xaction->dir == AL_CRYPT_ENCRYPT) { + AL_CRYPTO_STATS_INC(chan->stats_prep.aead_encrypt_hash_reqs, 1); + AL_CRYPTO_STATS_INC(chan->stats_prep.aead_encrypt_bytes, + xaction->enc_in_len); + AL_CRYPTO_STATS_INC(chan->stats_prep.aead_hash_bytes, + xaction->auth_in_len); + } else { + AL_CRYPTO_STATS_INC(chan->stats_prep.aead_decrypt_validate_reqs, 1); + AL_CRYPTO_STATS_INC(chan->stats_prep.aead_decrypt_bytes, + xaction->enc_in_len); + AL_CRYPTO_STATS_INC(chan->stats_prep.aead_validate_bytes, + xaction->auth_in_len); + } + + if (xaction->auth_in_len <= 512) + AL_CRYPTO_STATS_INC(chan->stats_prep.aead_reqs_le512, 1); + else if ((xaction->auth_in_len > 512) && (xaction->auth_in_len <= 2048)) + AL_CRYPTO_STATS_INC(chan->stats_prep.aead_reqs_512_2048, 1); + else if ((xaction->auth_in_len > 2048) && (xaction->auth_in_len <= 4096)) + AL_CRYPTO_STATS_INC(chan->stats_prep.aead_reqs_2048_4096, 1); + else + AL_CRYPTO_STATS_INC(chan->stats_prep.aead_reqs_gt4096, 1); +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_dump_xaction_buffers(struct al_crypto_sw_desc *desc) +{ + struct aead_request *req = (struct aead_request *)desc->req; + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct al_crypto_chan *chan = ctx->chan; + struct al_crypto_aead_req_ctx *rctx = aead_request_ctx(req); + struct al_crypto_transaction *xaction = &desc->hal_xaction; + struct scatterlist *sg; + int i; + + i = 0; + sg = rctx->in_sg; + while (sg) { + dev_dbg(to_dev(chan), "%s: src sg%d offset=%x dma_address=%x dma_len=%d\n", + __func__, i, sg->offset, sg_dma_address(sg), sg_dma_len(sg)); + sg = sg_next(sg); + i++; + } + + i = 0; + sg = rctx->out_sg; + while (sg) { + dev_dbg(to_dev(chan), "%s: dst sg%d offset=%x dma_address=%x dma_len=%d\n", + __func__, i, sg->offset, sg_dma_address(sg), sg_dma_len(sg)); + sg = sg_next(sg); + i++; + } + + dev_dbg(to_dev(chan), "%s: src.num=%d dst.num=%d\n", + __func__, xaction->src.num, xaction->dst.num); + + for (i = 0; i < xaction->src.num; i++) { + dev_dbg(to_dev(chan), "%s: src buf%d addr=%x len=%d\n", + __func__, i, desc->src_bufs[i].addr, desc->src_bufs[i].len); + } + + for (i = 0; i < xaction->dst.num; i++) { + dev_dbg(to_dev(chan), "%s: dst buf%d addr=%x len=%d\n", + __func__, i, desc->dst_bufs[i].addr, desc->dst_bufs[i].len); + } +} + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_prepare_xaction_buffers(struct aead_request *req, + struct al_crypto_sw_desc *desc, + u8 *iv) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct al_crypto_aead_req_ctx *rctx = aead_request_ctx(req); + struct al_crypto_chan *chan = ctx->chan; + struct al_crypto_transaction *xaction = &desc->hal_xaction; + int authsize = crypto_aead_authsize(tfm); + int ivsize = crypto_aead_ivsize(tfm); + int src_idx = 0, dst_idx = 0; + struct scatterlist *sg; + int i; + + print_hex_dump_debug(KBUILD_MODNAME ": iv: ", DUMP_PREFIX_OFFSET, + 16, 1, iv, ivsize, false); + + /* IV might be allocated on stack, copy for DMA */ + memcpy(rctx->iv, iv, ivsize); + /* map IV */ + xaction->enc_iv_in.addr = dma_map_single(to_dev(chan), + rctx->iv, ivsize, + DMA_TO_DEVICE); + if (dma_mapping_error(to_dev(chan), xaction->enc_iv_in.addr)) { + dev_err(to_dev(chan), "%s: dma_map_single failed!\n", __func__); + return -ENOMEM; + } + xaction->enc_iv_in.len = ivsize; + + xaction->src_size = rctx->total_in; + + /* add assoc+enc+auth data */ + sg_map_to_xaction_buffers(rctx->in_sg, desc->src_bufs, + xaction->src_size, + &src_idx); + if (rctx->in_sg == rctx->out_sg) { + for (i = 0; i < src_idx; i++) + desc->dst_bufs[i] = desc->src_bufs[i]; + dst_idx = src_idx; + } else { + sg_map_to_xaction_buffers(rctx->out_sg, desc->dst_bufs, + rctx->total_out, &dst_idx); + } + + xaction->auth_in_len += req->assoclen; + xaction->enc_in_off += req->assoclen; + + xaction->auth_in_len += rctx->cryptlen; + xaction->enc_in_len = rctx->cryptlen; + + xaction->src.bufs = &desc->src_bufs[0]; + xaction->src.num = src_idx; + xaction->dst.bufs = &desc->dst_bufs[0]; + xaction->dst.num = dst_idx; + + dev_dbg(to_dev(chan), "%s: src_size=%d src_idx=%d dst_idx=%d auth_in_off=%d auth_in_len=%d enc_in_off=%d enc_in_len=%d\n", + __func__, + xaction->src_size, src_idx, dst_idx, + xaction->auth_in_off, xaction->auth_in_len, + xaction->enc_in_off, xaction->enc_in_len); + + /* set signature buffer for auth */ + if (rctx->dir == AL_CRYPT_ENCRYPT) { + sg = rctx->out_sg; + while (!sg_is_last(sg)) + sg = sg_next(sg); + + dev_dbg(to_dev(chan), "%s: last dst sg dma_address=%x dma_len=%d\n", + __func__, sg_dma_address(sg), sg_dma_len(sg)); + + /* assume that auth result is not scattered */ + BUG_ON(sg_dma_len(sg) < authsize); + xaction->auth_sign_out.addr = sg_dma_address(sg) + sg_dma_len(sg) - authsize; + xaction->auth_sign_out.len = authsize; + + dev_dbg(to_dev(chan), "%s: auth_sign_out.addr=%x auth_sign_out.len=%d\n", + __func__, xaction->auth_sign_out.addr, xaction->auth_sign_out.len); + } else { + sg = rctx->in_sg; + while (!sg_is_last(sg)) + sg = sg_next(sg); + + dev_dbg(to_dev(chan), "%s: last src sg dma_address=%x dma_len=%d\n", + __func__, sg_dma_address(sg), sg_dma_len(sg)); + + /* assume that auth result is not scattered */ + BUG_ON(sg_dma_len(sg) < authsize); + xaction->auth_sign_in.addr = sg_dma_address(sg) + sg_dma_len(sg) - authsize; + xaction->auth_sign_in.len = authsize; + + dev_dbg(to_dev(chan), "%s: auth_sign_in.addr=%x auth_sign_in.len=%d\n", + __func__, xaction->auth_sign_in.addr, xaction->auth_sign_in.len); + } + + al_crypto_dump_xaction_buffers(desc); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +/* Prepare encryption+auth transaction to be processed by HAL + */ +static int al_crypto_prepare_xaction(struct aead_request *req, + struct al_crypto_sw_desc *desc, + u8 *iv) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct al_crypto_chan *chan = ctx->chan; + struct al_crypto_aead_req_ctx *rctx = aead_request_ctx(req); + struct al_crypto_transaction *xaction; + int rc = 0; + + dev_dbg(to_dev(chan), "%s: dir=%d\n", __func__, rctx->dir); + + xaction = &desc->hal_xaction; + memset(xaction, 0, sizeof(struct al_crypto_transaction)); + xaction->dir = rctx->dir; + + rc = al_crypto_prepare_xaction_buffers(req, desc, iv); + if (unlikely(rc != 0)) { + dev_err(to_dev(chan), "%s: al_crypto_prepare_xaction_buffers failed!\n", + __func__); + return rc; + } + + if (!ctx->cache_state.cached) { + xaction->sa_indx = al_crypto_cache_replace_lru(chan, + &ctx->cache_state, NULL); + xaction->sa_in.addr = ctx->hw_sa_dma_addr; + xaction->sa_in.len = sizeof(struct al_crypto_hw_sa); + } else { + al_crypto_cache_update_lru(chan, &ctx->cache_state); + xaction->sa_indx = ctx->cache_state.idx; + } + + xaction->flags = AL_SSM_INTERRUPT; + + al_crypto_update_stats(xaction, chan); + + return rc; +} + +static void al_crypto_sg_copy_buf(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes, int out) +{ + struct scatter_walk walk; + + if (!nbytes) + return; + + scatterwalk_start(&walk, sg); + scatterwalk_advance(&walk, start); + scatterwalk_copychunks(buf, &walk, nbytes, out); + scatterwalk_done(&walk, out, 0); +} + +static bool al_crypto_is_aligned(struct scatterlist *sg, size_t align) +{ + while (sg) { + if (!IS_ALIGNED(sg->offset, align)) + return false; + if (!IS_ALIGNED(sg->length, align)) + return false; + sg = sg_next(sg); + } + return true; +} + +static bool al_crypto_need_copy_sg(struct aead_request *req) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct al_crypto_aead_req_ctx *rctx = aead_request_ctx(req); + struct al_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct al_crypto_chan *chan = ctx->chan; + int authsize = crypto_aead_authsize(tfm); + struct scatterlist *sg; + + /* auth tag cannot be scattered */ + if (rctx->dir == AL_CRYPT_ENCRYPT) { + sg = rctx->out_sg; + while (!sg_is_last(sg)) + sg = sg_next(sg); + dev_dbg(to_dev(chan), "%s: last dst sg_dma_len=%d\n", + __func__, sg_dma_len(sg)); + if (sg_dma_len(sg) < authsize) + return true; + } else { + sg = rctx->in_sg; + while (!sg_is_last(sg)) + sg = sg_next(sg); + dev_dbg(to_dev(chan), "%s: last src sg_dma_len=%d\n", + __func__, sg_dma_len(sg)); + if (sg_dma_len(sg) < authsize) + return true; + } + + /* hw can't handle separate src and dst sgs ? */ + if (rctx->in_sg != rctx->out_sg) + return true; + + if (!al_crypto_is_aligned(rctx->in_sg, AES_BLOCK_SIZE)) + return true; + if (!al_crypto_is_aligned(rctx->out_sg, AES_BLOCK_SIZE)) + return true; + + return false; +} + +static int al_crypto_copy_sg(struct aead_request *req) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct al_crypto_chan *chan = ctx->chan; + struct al_crypto_aead_req_ctx *rctx = aead_request_ctx(req); + gfp_t gfp_flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; + bool need_copy = al_crypto_need_copy_sg(req); + int len; + void *buf; + + rctx->sgs_copied = 0; + + len = max(rctx->total_in, rctx->total_out); + + dev_dbg(to_dev(chan), "%s: copy sg %sneeded buflen=%d\n", + __func__, need_copy ? "" : "not ", len); + + if (!need_copy) + return 0; + + rctx->pages_sg = get_order(len); + buf = (void *)__get_free_pages(gfp_flags, rctx->pages_sg); + if (!buf) { + dev_err(to_dev(chan), "Can't allocate pages when unaligned\n"); + return -EFAULT; + } + + al_crypto_sg_copy_buf(buf, req->src, 0, rctx->total_in, 0); + + sg_init_one(&rctx->sgl, buf, len); + rctx->in_sg = rctx->out_sg = &rctx->sgl; + + rctx->total_in = rctx->total_out = len; + + rctx->sgs_copied = 1; + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +/* Prepare aead encryption and auth dma, call hal transaction preparation + * function and submit the request to HAL. + * Grabs and releases producer lock for relevant sw ring + */ +static int al_crypto_do_crypt(struct aead_request *req, u8 *iv) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct al_crypto_chan *chan = ctx->chan; + struct al_crypto_aead_req_ctx *rctx = aead_request_ctx(req); + int authsize = crypto_aead_authsize(tfm); + struct al_crypto_sw_desc *desc; + int src_nents = 0, dst_nents = 0; + int idx, rc; + + rctx->in_sg = req->src; + rctx->out_sg = req->dst; + + if (rctx->dir == AL_CRYPT_ENCRYPT) { + rctx->total_in = req->assoclen + rctx->cryptlen; + rctx->total_out = req->assoclen + rctx->cryptlen + authsize; + } else { + rctx->total_in = req->assoclen + rctx->cryptlen + authsize; + rctx->total_out = req->assoclen + rctx->cryptlen; + } + + rctx->total_out_save = rctx->total_out; + + if (rctx->out_sg == rctx->in_sg) + rctx->total_in = rctx->total_out = max(rctx->total_in, rctx->total_out); + + rc = al_crypto_copy_sg(req); + if (rc) + return rc; + + if (rctx->out_sg != rctx->in_sg) { + src_nents = sg_nents_for_len(rctx->in_sg, rctx->total_in); + dst_nents = sg_nents_for_len(rctx->out_sg, rctx->total_out); + } else { + src_nents = sg_nents_for_len(rctx->in_sg, rctx->total_in); + dst_nents = src_nents; + } + + dev_dbg(to_dev(chan), "%s: src_nents=%d dst_nents=%d authsize=%d (rctx->in_sg %s rctx->out_sg)\n", + __func__, src_nents, dst_nents, authsize, rctx->in_sg == rctx->out_sg ? "==" : "!="); + + /* Currently supported max sg chain length is + * AL_CRYPTO_OP_MAX_DATA_BUFS(12) which is minimum of descriptors left + * for data in a transaction: + * tx: 31(supported by HW) - 1(metadata) - 1(sa_in) - + * 1(enc_iv_in|auth_iv_in) - 1(auth_sign_in) = 27 + * rx: 31(supported by HW) - 1(sa_out) - 1(enc_iv_out|auth_iv_out) - + * 1(next_enc_iv_out) - 1(auth_sign_out) = 27 + */ + BUG_ON((src_nents + 1 > AL_CRYPTO_OP_MAX_BUFS) || + (dst_nents + 1 > AL_CRYPTO_OP_MAX_BUFS)); + + if (likely(rctx->in_sg == rctx->out_sg)) { + dma_map_sg(to_dev(chan), rctx->in_sg, src_nents, DMA_BIDIRECTIONAL); + } else { + dma_map_sg(to_dev(chan), rctx->in_sg, src_nents, DMA_TO_DEVICE); + dma_map_sg(to_dev(chan), rctx->out_sg, dst_nents, DMA_FROM_DEVICE); + } + + spin_lock_bh(&chan->prep_lock); + if (likely(al_crypto_get_sw_desc(chan, 1) == 0)) + idx = chan->head; + else { + dev_dbg(to_dev(chan), + "%s: al_crypto_get_sw_desc failed!\n", __func__); + al_crypto_dma_unmap(chan, req, src_nents, dst_nents, NULL); + spin_unlock_bh(&chan->prep_lock); + return -EBUSY; + } + + chan->sw_desc_num_locked = 1; + chan->tx_desc_produced = 0; + + desc = al_crypto_get_ring_ent(chan, idx); + desc->req = (void *)req; + desc->req_type = AL_CRYPTO_REQ_AEAD; + desc->src_nents = src_nents; + desc->dst_nents = dst_nents; + + rc = al_crypto_prepare_xaction(req, desc, iv); + if (unlikely(rc != 0)) { + dev_err(to_dev(chan), + "%s: al_crypto_prepare_xaction failed!\n", __func__); + al_crypto_dma_unmap(chan, req, src_nents, dst_nents, desc); + spin_unlock_bh(&chan->prep_lock); + return rc; + } + + /* send crypto transaction to engine */ + rc = al_crypto_dma_prepare(chan->hal_crypto, chan->idx, &desc->hal_xaction); + if (unlikely(rc != 0)) { + dev_err(to_dev(chan), + "%s: al_crypto_dma_prepare failed!\n", __func__); + al_crypto_dma_unmap(chan, req, src_nents, dst_nents, desc); + spin_unlock_bh(&chan->prep_lock); + return rc; + } + + chan->tx_desc_produced += desc->hal_xaction.tx_descs_count; + + dev_dbg(to_dev(chan), "%s: tx_desc_produced=%d\n", + __func__, chan->tx_desc_produced); + + al_crypto_tx_submit(chan); + + spin_unlock_bh(&chan->prep_lock); + + return -EINPROGRESS; +} + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_encrypt(struct aead_request *req) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct al_crypto_chan *chan = ctx->chan; + struct al_crypto_aead_req_ctx *rctx = aead_request_ctx(req); + + dev_dbg(to_dev(chan), "%s: cryptlen=%d assoclen=%d\n", + __func__, req->cryptlen, req->assoclen); + print_hex_dump_debug(KBUILD_MODNAME ": iv: ", DUMP_PREFIX_OFFSET, + 16, 1, req->iv, crypto_aead_ivsize(tfm), false); + + rctx->dir = AL_CRYPT_ENCRYPT; + rctx->cryptlen = req->cryptlen; + + return al_crypto_do_crypt(req, req->iv); +} + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_decrypt(struct aead_request *req) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_aead_ctx(tfm); + struct al_crypto_chan *chan = ctx->chan; + struct al_crypto_aead_req_ctx *rctx = aead_request_ctx(req); + int authsize = crypto_aead_authsize(tfm); + + dev_dbg(to_dev(chan), "%s: cryptlen=%d assoclen=%d authsize=%d\n", + __func__, req->cryptlen, req->assoclen, authsize); + print_hex_dump_debug(KBUILD_MODNAME ": iv: ", DUMP_PREFIX_OFFSET, + 16, 1, req->iv, crypto_aead_ivsize(tfm), false); + + rctx->dir = AL_CRYPT_DECRYPT; + /* req->cryptlen includes the authsize when decrypting */ + rctx->cryptlen = req->cryptlen - authsize; + + return al_crypto_do_crypt(req, req->iv); +} + +/****************************************************************************** + *****************************************************************************/ +static struct al_crypto_alg *al_crypto_alg_alloc(struct al_crypto_device *device, + struct al_crypto_aead_template *template) +{ + struct al_crypto_alg *t_alg; + struct aead_alg *alg; + + t_alg = kzalloc(sizeof(struct al_crypto_alg), GFP_KERNEL); + if (!t_alg) { + dev_err(&device->pdev->dev, "failed to allocate t_alg\n"); + return ERR_PTR(-ENOMEM); + } + + alg = &t_alg->alg; + *alg = template->alg; + + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->driver_name); + + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = AL_CRYPTO_CRA_PRIORITY; + alg->base.cra_blocksize = template->blocksize; + alg->base.cra_alignmask = AES_BLOCK_SIZE - 1; + alg->base.cra_ctxsize = sizeof(struct al_crypto_ctx); + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + + alg->chunksize = alg->base.cra_blocksize; + + t_alg->enc_type = template->enc_type; + t_alg->sa_op = template->sa_op; + t_alg->auth_type = template->auth_type; + t_alg->sha2_mode = template->sha2_mode; + t_alg->device = device; + + snprintf(t_alg->sw_hash_name, CRYPTO_MAX_ALG_NAME, "%s", + template->sw_hash_name); + t_alg->sw_hash_interm_offset = template->sw_hash_interm_offset; + t_alg->sw_hash_interm_size = template->sw_hash_interm_size; + + return t_alg; +} + +/****************************************************************************** + *****************************************************************************/ +/* Cleanup single aead request - invoked from cleanup tasklet (interrupt + * handler) + */ +void al_crypto_aead_cleanup_single(struct al_crypto_chan *chan, + struct al_crypto_sw_desc *desc, + uint32_t comp_status) +{ + struct aead_request *req = (struct aead_request *)desc->req; + struct al_crypto_aead_req_ctx *rctx = aead_request_ctx(req); + int err = 0; + + dev_dbg(to_dev(chan), "%s: chan->idx=%d comp_status=%x\n", + __func__, chan->idx, comp_status); + + al_crypto_dma_unmap(chan, req, desc->src_nents, desc->dst_nents, desc); + + if (comp_status & AL_CRYPT_AUTH_ERROR) + err = -EBADMSG; + + dev_dbg(to_dev(chan), "%s: assoclen=%d cryptlen=%d\n", + __func__, req->assoclen, rctx->cryptlen); + + if (rctx->sgs_copied) { + void *buf = sg_virt(&rctx->sgl); + al_crypto_sg_copy_buf(buf, req->dst, 0, rctx->total_out_save, 1); + free_pages((unsigned long)buf, rctx->pages_sg); + } + + req->base.complete(&req->base, err); +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_aead_init(struct al_crypto_device *device) +{ + int err = 0; + int i; + + INIT_LIST_HEAD(&device->aead_list); + + atomic_set(&device->tfm_count, -1); + + /* register crypto algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + struct al_crypto_alg *t_alg; + + t_alg = al_crypto_alg_alloc(device, &driver_algs[i]); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + dev_warn(&device->pdev->dev, + "%s alg allocation failed with %d\n", + driver_algs[i].driver_name, err); + continue; + } + + err = crypto_register_aeads(&t_alg->alg, 1); + if (err) { + dev_warn(&device->pdev->dev, + "%s alg registration failed with %d\n", + t_alg->alg.base.cra_driver_name, err); + kfree(t_alg); + } else + list_add_tail(&t_alg->entry, &device->aead_list); + } + + if (!list_empty(&device->aead_list)) + dev_info(&device->pdev->dev, + "aead algorithms registered in /proc/crypto\n"); + + return err; +} + +/****************************************************************************** + *****************************************************************************/ +void al_crypto_aead_terminate(struct al_crypto_device *device) +{ + struct al_crypto_alg *t_alg, *n; + + if (!device->aead_list.next) + return; + + list_for_each_entry_safe(t_alg, n, &device->aead_list, entry) { + crypto_unregister_aeads(&t_alg->alg, 1); + list_del(&t_alg->entry); + kfree(t_alg); + } +} diff --git a/target/linux/alpine/files/drivers/crypto/al/al_crypto_core.c b/target/linux/alpine/files/drivers/crypto/al/al_crypto_core.c new file mode 100644 index 00000000000000..b49f6dd6b11848 --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/al_crypto_core.c @@ -0,0 +1,1435 @@ +/* + * drivers/crypto/al_crypto_core.c + * + * Annapurna Labs Crypto driver - core + * + * Copyright (C) 2012 Annapurna Labs Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* +#ifndef DEBUG +#define DEBUG +#endif +*/ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "al_crypto.h" +#include "al_crypto_module_params.h" + +#define smp_read_barrier_depends() do {} while(0) + +static void al_crypto_free_chan_resources( + struct al_crypto_chan *chan); + +static int al_crypto_alloc_chan_resources( + struct al_crypto_chan *chan); + +static void al_crypto_free_channels( + struct al_crypto_device *device); + +static int al_crypto_alloc_channels( + struct al_crypto_device *device); + +static int al_crypto_setup_interrupts( + struct al_crypto_device *device); + +static irqreturn_t al_crypto_do_interrupt_msix( + int irq, + void *data); + +static irqreturn_t al_crypto_do_interrupt_group_d( + int irq, + void *data); + +static irqreturn_t al_crypto_do_interrupt_msix_rx( + int irq, + void *data); + +static irqreturn_t al_crypto_do_interrupt_legacy( + int irq, + void *data); + +static int al_crypto_init_channels( + struct al_crypto_device *device, + int max_channels); + +static void al_crypto_init_channel( + struct al_crypto_device *device, + struct al_crypto_chan *chan, + int idx); + +static struct al_crypto_sw_desc **al_crypto_alloc_sw_ring( + struct al_crypto_chan *chan, + int order, + gfp_t flags); + +static void al_crypto_free_sw_ring( + struct al_crypto_sw_desc **ring, + struct al_crypto_chan *chan, + int size); + +static struct al_crypto_sw_desc *al_crypto_alloc_ring_ent( + struct al_crypto_chan *chan, + gfp_t flags); + +static void al_crypto_free_ring_ent( + struct al_crypto_sw_desc *desc, + struct al_crypto_chan *chan); + +static int al_crypto_iofic_config(struct al_crypto_device *device, + bool single_msix); + +static void al_crypto_cleanup_tasklet( + unsigned long data); + +static void al_crypto_cleanup_tasklet_msix_rx( + unsigned long data); + +static void al_crypto_cleanup_tasklet_legacy( + unsigned long data); + +static void al_crypto_unmask_interrupts(struct al_crypto_device *device, + bool single_interrupt); + +static void al_crypto_group_d_errors_handler(struct al_crypto_device *device); +/****************************************************************************** + *****************************************************************************/ +int al_crypto_core_init(struct al_crypto_device *device, + void __iomem *iobase_udma, + void __iomem *iobase_app) +{ + int32_t rc; + int err; + int max_channels; + int crc_channels; + + dev_dbg(&device->pdev->dev, "%s\n", __func__); + + device->cache = kmem_cache_create("al_crypto", + sizeof(struct al_crypto_sw_desc), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!device->cache) + return -ENOMEM; + + max_channels = device->max_channels; + crc_channels = device->crc_channels; + + if ((crc_channels > max_channels) || (max_channels > DMA_MAX_Q)) { + dev_err(&device->pdev->dev, "invalid number of channels\n"); + err = -EINVAL; + goto done; + } + + device->udma_regs_base = iobase_udma; + /* The crypto regs exists only for the PF. + * The VF uses the same configs/ error reporting as the PF */ + device->crypto_regs_base = iobase_app ? iobase_app + + AL_CRYPTO_APP_REGS_BASE_OFFSET : NULL; + + device->ssm_dma_params.udma_regs_base = device->udma_regs_base; + + device->ssm_dma_params.name = + kmalloc(strlen(dev_name(&device->pdev->dev)) + 1, GFP_KERNEL); + if (device->ssm_dma_params.name == NULL) { + dev_err(&device->pdev->dev, "kmalloc failed\n"); + err = -ENOMEM; + goto done; + } + + strcpy(device->ssm_dma_params.name, dev_name(&device->pdev->dev)); + + device->ssm_dma_params.num_of_queues = max_channels; + + err = al_ssm_dma_init(&device->hal_crypto, + &device->ssm_dma_params); + if (err) { + dev_err(&device->pdev->dev, "al_crypto_dma_init failed\n"); + goto err_no_chan; + } + + /* enumerate and initialize channels (queues) */ + al_crypto_init_channels(device, max_channels); + + err = al_crypto_alloc_channels(device); + if (err) { + dev_err(&device->pdev->dev, + "failed to alloc channel resources\n"); + goto err_no_irq; + } + + /* enable Crypto DMA engine */ + rc = al_ssm_dma_state_set(&device->hal_crypto, UDMA_NORMAL); + + err = al_crypto_setup_interrupts(device); + + if (err) { + dev_err(&device->pdev->dev, "failed to setup interrupts\n"); + goto err_no_irq; + } + + goto done; + +err_no_irq: + al_crypto_free_channels(device); +err_no_chan: + kfree(device->ssm_dma_params.name); +done: + return err; +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_core_terminate(struct al_crypto_device *device) +{ + int status = 0; + + dev_dbg(&device->pdev->dev, "%s\n", __func__); + + al_crypto_free_channels(device); + + kfree(device->ssm_dma_params.name); + + kmem_cache_destroy(device->cache); + + return status; +} + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_init_channels(struct al_crypto_device *device, + int max_channels) +{ + int i; + struct al_crypto_chan *chan; + + for (i = 0; i < max_channels; i++) { + chan = kzalloc(sizeof(struct al_crypto_chan), GFP_KERNEL); + if (!chan) + break; + + al_crypto_init_channel(device, chan, i); + } + + device->num_channels = i; + + return i; +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_init_channel(struct al_crypto_device *device, + struct al_crypto_chan *chan, int idx) +{ + unsigned long data = (unsigned long)chan; + + dev_dbg(&device->pdev->dev, "%s: idx=%d\n", + __func__, idx); + + chan->device = device; + chan->idx = idx; + chan->hal_crypto = &device->hal_crypto; + + AL_CRYPTO_STATS_INIT_LOCK(&chan->stats_gen_lock); + spin_lock_init(&chan->prep_lock); + spin_lock_init(&chan->cleanup_lock); + + device->channels[idx] = chan; + + INIT_LIST_HEAD(&chan->cache_lru_list); + chan->cache_lru_count = 0; + + tasklet_init(&chan->cleanup_task, al_crypto_cleanup_tasklet, data); + + crypto_init_queue(&chan->sw_queue, 1); +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_unmask_interrupts(struct al_crypto_device *device, + bool single_interrupt) +{ + /* enable group D summary */ + u32 group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; + u32 group_b_mask = (1 << device->num_channels) - 1; /* bit per Rx q*/ + u32 group_d_mask = AL_INT_GROUP_D_ALL; + + struct unit_regs __iomem *regs_base = (struct unit_regs __iomem *)device->udma_regs_base; + + if (single_interrupt) + group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM; + + al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_A, group_a_mask); + al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B, group_b_mask); + al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D, group_d_mask); +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_config_crypto_app_interrupts + (struct al_crypto_device *device) +{ + if (!device->crypto_regs_base) + return; + + al_iofic_clear_cause( + device->crypto_regs_base + AL_CRYPTO_APP_IOFIC_OFFSET, + AL_INT_GROUP_A, + AL_CRYPTO_APP_INT_A_ALL); + + al_iofic_unmask( + device->crypto_regs_base + AL_CRYPTO_APP_IOFIC_OFFSET, + AL_INT_GROUP_A, + AL_CRYPTO_APP_INT_A_ALL); + + al_iofic_config( + device->crypto_regs_base + AL_CRYPTO_APP_IOFIC_OFFSET, + AL_INT_GROUP_A, + INT_CONTROL_GRP_CLEAR_ON_READ | + INT_CONTROL_GRP_MASK_MSI_X); + + /* Clear the interrupt reg */ + al_iofic_read_cause( + device->crypto_regs_base + AL_CRYPTO_APP_IOFIC_OFFSET, + AL_INT_GROUP_A); +} + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_iofic_config(struct al_crypto_device *device, + bool single_msix) +{ + enum al_iofic_mode iofic_mode; + int int_moderation_group; + + if (single_msix) { + iofic_mode = AL_IOFIC_MODE_MSIX_PER_GROUP; + int_moderation_group = AL_INT_GROUP_A; + } else { + iofic_mode = AL_IOFIC_MODE_MSIX_PER_Q; + int_moderation_group = AL_INT_GROUP_B; + } + + if (al_udma_iofic_config( + (struct unit_regs *)device->udma_regs_base, + iofic_mode, 0x480, 0x480, 0x1E0, 0x1E0)) { + + dev_err(&device->pdev->dev, "al_udma_iofic_config failed!.\n"); + return -EIO; + } + + al_iofic_moder_res_config( + &((struct unit_regs *)(device->udma_regs_base))->gen. + interrupt_regs.main_iofic, + int_moderation_group, 15); + + al_crypto_config_crypto_app_interrupts(device); + al_crypto_unmask_interrupts(device, single_msix); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_setup_interrupts(struct al_crypto_device *device) +{ + struct al_crypto_chan *chan; + struct pci_dev *pdev = device->pdev; + struct device *dev = &pdev->dev; + struct msix_entry *msix; + int i, msixcnt; + unsigned int cpu; + int err = -EINVAL; + int devm_alloc_num = 0; + + if (al_crypto_get_use_single_msix()) + goto msix_single_vector; + + /* The number of MSI-X vectors should equal the number of channels + 1 + * for group D */ + msixcnt = device->num_channels + 1; + + for (i = 0; i < device->num_channels; i++) + device->msix_entries[i].entry = + AL_INT_MSIX_RX_COMPLETION_START + i; + + device->msix_entries[device->num_channels].entry = + AL_INT_MSIX_GROUP_A_SUM_D_IDX; + + err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt); + + if (err < 0) { + dev_err(dev, "pci_enable_msix failed! using intx instead.\n"); + goto intx; + } + + if (err > 0) { + dev_err(dev, "pci_enable_msix failed! msix_single_vector.\n"); + goto msix_single_vector; + } + + for (i = 0; i < device->num_channels; i++) { + msix = &device->msix_entries[i]; + + chan = device->channels[i]; + + dev_dbg(dev, "%s: requesting irq %d\n", __func__, msix->vector); + + snprintf(device->irq_tbl[i].name, AL_CRYPTO_IRQNAME_SIZE, + "al-crypto-comp-%d@pci:%s", i, + pci_name(pdev)); + + err = devm_request_irq( + dev, + msix->vector, + al_crypto_do_interrupt_msix, + 0, + device->irq_tbl[i].name, + chan); + + if (err) { + dev_err(dev, "devm_request_irq failed!.\n"); + goto err_free_devm; + } + + devm_alloc_num++; + + cpu = cpumask_next((i % num_online_cpus() - 1), cpu_online_mask); + cpumask_set_cpu(cpu, &chan->affinity_hint_mask); + + irq_set_affinity_hint(msix->vector, &chan->affinity_hint_mask); + } + + snprintf(device->irq_tbl[device->num_channels].name, + AL_CRYPTO_IRQNAME_SIZE, + "al-crypto-interrupt-group-d@pci:%s", + pci_name(pdev)); + + err = devm_request_irq( + dev, + device->msix_entries[device->num_channels].vector, + al_crypto_do_interrupt_group_d, + 0, + device->irq_tbl[device->num_channels].name, + device); + + if (err) { + dev_err(dev, "devm_request_irq failed!.\n"); + goto err_free_devm; + } + + err = al_crypto_iofic_config(device, false); + if (err) + return err; + + goto done; + +msix_single_vector: + msix = &device->msix_entries[0]; + + msix->entry = 0; + + tasklet_init(&device->cleanup_task, + al_crypto_cleanup_tasklet_msix_rx, + (unsigned long)device); + + err = pci_enable_msix_exact(pdev, device->msix_entries, 1); + + if (err) { + pci_disable_msix(pdev); + goto intx; + } + + snprintf(device->irq_tbl[0].name, AL_CRYPTO_IRQNAME_SIZE, + "al-crypto-msix-all@pci:%s", pci_name(pdev)); + + err = devm_request_irq( + dev, + msix->vector, + al_crypto_do_interrupt_msix_rx, + 0, + device->irq_tbl[0].name, + device); + + if (err) { + dev_err(dev, "devm_request_irq failed!.\n"); + pci_disable_msix(pdev); + goto intx; + } + + devm_alloc_num = 1; + + err = al_crypto_iofic_config(device, true); + if (err) + return err; + goto done; + +intx: + tasklet_init(&device->cleanup_task, + al_crypto_cleanup_tasklet_legacy, + (unsigned long)device); + + snprintf(device->irq_tbl[0].name, AL_CRYPTO_IRQNAME_SIZE, + "al-crypto-intx-all@pci:%s", pci_name(pdev)); + + err = devm_request_irq(dev, pdev->irq, al_crypto_do_interrupt_legacy, + IRQF_SHARED, device->irq_tbl[0].name, device); + if (err) + goto err_no_irq; + + if (al_udma_iofic_config( + (struct unit_regs *)device->udma_regs_base, + AL_IOFIC_MODE_LEGACY, 0x480, 0x480, 0x1E0, 0x1E0)) { + dev_err(dev, "al_udma_iofic_config failed!.\n"); + return -EIO; + } + + al_crypto_config_crypto_app_interrupts(device); + al_crypto_unmask_interrupts(device, true); + +done: + device->num_irq_used = devm_alloc_num; + return 0; + +err_free_devm: + + for (i = 0; i < devm_alloc_num; i++) { + msix = &device->msix_entries[i]; + chan = device->channels[i]; + irq_set_affinity_hint(msix->vector, NULL); + devm_free_irq(dev, msix->vector, chan); + return -EIO; + } + +err_no_irq: + /* Disable all interrupt generation */ + + dev_err(dev, "no usable interrupts\n"); + return err; +} + +/****************************************************************************** + *****************************************************************************/ +/* Free tx and rx descriptor rings for all channels + */ +static void al_crypto_free_channels(struct al_crypto_device *device) +{ + int i; + + for (i = 0; i < device->num_channels; i++) { + al_crypto_free_chan_resources(device->channels[i]); + al_crypto_free_channel(device->channels[i]); + } + + for (i = 0; i < device->num_irq_used; i++) + irq_set_affinity_hint(device->msix_entries[i].vector, NULL); +} + +/****************************************************************************** + *****************************************************************************/ +/* Allocate/initialize tx and rx descriptor rings for all channels + */ +static int al_crypto_alloc_channels(struct al_crypto_device *device) +{ + int i, j; + int err = -EINVAL; + + for (i = 0; i < device->num_channels; i++) { + err = al_crypto_alloc_chan_resources(device->channels[i]); + + if (err < 0) { + dev_err( + &device->pdev->dev, + "failed to alloc resources for channel %d\n", + i); + + for (j = 0; j < i; j++) { + al_crypto_free_chan_resources( + device->channels[j]); + } + return err; + } + } + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static inline bool al_crypto_is_crypt_auth_chan(struct al_crypto_chan *chan) +{ + struct al_crypto_device *device = chan->device; + return (chan->idx < device->num_channels - device->crc_channels); +} + +/****************************************************************************** + *****************************************************************************/ +/* Allocate/initialize tx and rx descriptor rings for one channel + */ +static int al_crypto_alloc_chan_resources(struct al_crypto_chan *chan) +{ + struct al_crypto_device *device = chan->device; + struct device *dev = to_dev(chan); + struct al_crypto_sw_desc **sw_ring; + struct al_udma_q_params tx_params; + struct al_udma_q_params rx_params; + struct al_udma_m2s_pkt_len_conf conf; + struct al_udma *crypto_udma; + + int rc = 0; + int tx_descs_order; + int rx_descs_order; + int ring_alloc_order; + + dev_dbg(dev, "%s: idx=%d\n", __func__, chan->idx); + + /* have we already been set up? */ + if (chan->sw_ring) + return 1 << chan->alloc_order; + + tx_descs_order = al_crypto_get_tx_descs_order(); + rx_descs_order = al_crypto_get_rx_descs_order(); + ring_alloc_order = al_crypto_get_ring_alloc_order(); + + chan->tx_descs_num = 1 << tx_descs_order; + chan->rx_descs_num = 1 << rx_descs_order; + + /* allocate coherent memory for Tx submission descriptors */ + chan->tx_dma_desc_virt = dma_alloc_coherent(dev, + chan->tx_descs_num * + sizeof(union al_udma_desc), + &chan->tx_dma_desc, + GFP_KERNEL); + if (chan->tx_dma_desc_virt == NULL) { + dev_err(dev, "failed to allocate %d bytes of coherent memory for Tx submission descriptors\n", + chan->tx_descs_num * sizeof(union al_udma_desc)); + return -ENOMEM; + } + dev_dbg(dev, "allocted tx descriptor ring: virt 0x%p phys 0x%llx\n", + chan->tx_dma_desc_virt, (u64)chan->tx_dma_desc); + + /* allocate coherent memory for Rx submission descriptors */ + chan->rx_dma_desc_virt = dma_alloc_coherent(dev, + chan->rx_descs_num * + sizeof(union al_udma_desc), + &chan->rx_dma_desc, + GFP_KERNEL); + if (chan->rx_dma_desc_virt == NULL) { + dev_err(dev, "failed to allocate %d bytes of coherent memory for Rx submission descriptors\n", + chan->rx_descs_num * sizeof(union al_udma_desc)); + + al_crypto_free_chan_resources(chan); + return -ENOMEM; + } + dev_dbg(dev, "allocted rx descriptor ring: virt 0x%p phys 0x%llx\n", + chan->rx_dma_desc_virt, (u64)chan->rx_dma_desc); + + /* allocate coherent memory for Rx completion descriptors */ + chan->rx_dma_cdesc_virt = dma_alloc_coherent(dev, + chan->rx_descs_num * + AL_CRYPTO_RX_CDESC_SIZE, + &chan->rx_dma_cdesc, + GFP_KERNEL); + if (chan->rx_dma_cdesc_virt == NULL) { + dev_err(dev, "failed to allocate %d bytes of coherent memory for Rx completion descriptors\n", + chan->rx_descs_num * AL_CRYPTO_RX_CDESC_SIZE); + + al_crypto_free_chan_resources(chan); + return -ENOMEM; + } + + /* clear the Rx completion descriptors to avoid false positive */ + memset( + chan->rx_dma_cdesc_virt, + 0, + chan->rx_descs_num * AL_CRYPTO_RX_CDESC_SIZE); + + dev_dbg( + dev, + "allocted rx completion desc ring: virt 0x%p phys 0x%llx\n", + chan->rx_dma_cdesc_virt, (u64)chan->rx_dma_cdesc); + + rc = al_ssm_dma_handle_get( + &device->hal_crypto, + UDMA_TX, + &crypto_udma); + if (rc) { + dev_err(to_dev(chan), "al_crypto_dma_handle_get failed\n"); + al_crypto_free_chan_resources(chan); + return rc; + } + + conf.encode_64k_as_zero = true; + conf.max_pkt_size = 0xfffff; + al_udma_m2s_packet_size_cfg_set(crypto_udma, &conf); + + tx_params.size = chan->tx_descs_num; + tx_params.desc_base = chan->tx_dma_desc_virt; + tx_params.desc_phy_base = chan->tx_dma_desc; + tx_params.cdesc_base = NULL; /* don't use Tx completion ring */ + tx_params.cdesc_phy_base = 0; + tx_params.cdesc_size = AL_CRYPTO_TX_CDESC_SIZE; /* size is needed */ + + rx_params.size = chan->rx_descs_num; + rx_params.desc_base = chan->rx_dma_desc_virt; + rx_params.desc_phy_base = chan->rx_dma_desc; + rx_params.cdesc_base = chan->rx_dma_cdesc_virt; + rx_params.cdesc_phy_base = chan->rx_dma_cdesc; + rx_params.cdesc_size = AL_CRYPTO_RX_CDESC_SIZE; + + /* alloc sw descriptors */ + if (ring_alloc_order < AL_CRYPTO_SW_RING_MIN_ORDER) { + dev_err( + dev, + "%s: ring_alloc_order = %d < %d!\n", + __func__, + ring_alloc_order, + AL_CRYPTO_SW_RING_MIN_ORDER); + + al_crypto_free_chan_resources(chan); + return -EINVAL; + } else if (ring_alloc_order > AL_CRYPTO_SW_RING_MAX_ORDER) { + dev_err( + dev, + "%s: ring_alloc_order = %d > %d!\n", + __func__, + ring_alloc_order, + AL_CRYPTO_SW_RING_MAX_ORDER); + + al_crypto_free_chan_resources(chan); + return -EINVAL; + } else if (ring_alloc_order > rx_descs_order) { + dev_warn( + dev, + "%s: ring_alloc_order > rx_descs_order (%d>%d)!\n", + __func__, + ring_alloc_order, + rx_descs_order); + + } + + sw_ring = al_crypto_alloc_sw_ring(chan, ring_alloc_order, GFP_KERNEL); + if (!sw_ring) { + dev_err( + dev, + "%s: sw ring alloc failed! ring_alloc_order = %d\n", + __func__, + ring_alloc_order); + + al_crypto_free_chan_resources(chan); + return -ENOMEM; + } + + spin_lock_bh(&chan->cleanup_lock); + spin_lock_bh(&chan->prep_lock); + chan->sw_ring = sw_ring; + chan->head = 0; + chan->tail = 0; + chan->alloc_order = ring_alloc_order; + chan->type = al_crypto_is_crypt_auth_chan(chan) ? + AL_CRYPT_AUTH_Q : AL_MEM_CRC_MEMCPY_Q; + + chan->cache_entries_num = al_crypto_is_crypt_auth_chan(chan) ? + (CACHED_SAD_SIZE / (device->num_channels - + device->crc_channels)) : + (CRC_IV_CACHE_SIZE / device->crc_channels); + + rc = al_ssm_dma_q_init(&device->hal_crypto, chan->idx, + &tx_params, &rx_params, chan->type); + if (rc) { + dev_err(dev, "failed to initialize hal q %d. rc %d\n", + chan->idx, rc); + + spin_unlock_bh(&chan->prep_lock); + spin_unlock_bh(&chan->cleanup_lock); + al_crypto_free_chan_resources(chan); + return -ENOMEM; + } + + spin_unlock_bh(&chan->prep_lock); + spin_unlock_bh(&chan->cleanup_lock); + + /* should we return less ?*/ + return 1 << chan->alloc_order; +} + +/****************************************************************************** + *****************************************************************************/ +/* Free tx and rx descriptor rings for one channel + */ +static void al_crypto_free_chan_resources(struct al_crypto_chan *chan) +{ + struct device *dev = to_dev(chan); + + dev_dbg(dev, "%s: idx=%d\n", __func__, chan->idx); + + tasklet_disable(&chan->cleanup_task); + + al_crypto_cleanup_fn(chan, 0); + + spin_lock_bh(&chan->cleanup_lock); + + al_crypto_free_sw_ring(chan->sw_ring, chan , 1 << chan->alloc_order); + + if (chan->tx_dma_desc_virt != NULL) { + dma_free_coherent(dev, + chan->tx_descs_num * sizeof(union al_udma_desc), + chan->tx_dma_desc_virt, chan->tx_dma_desc); + chan->tx_dma_desc_virt = NULL; + } + + if (chan->rx_dma_desc_virt != NULL) { + dma_free_coherent( + dev, + chan->rx_descs_num * sizeof(union al_udma_desc), + chan->rx_dma_desc_virt, + chan->rx_dma_desc); + chan->rx_dma_desc_virt = NULL; + } + + if (chan->rx_dma_cdesc_virt != NULL) { + dma_free_coherent(dev, chan->rx_descs_num * AL_CRYPTO_RX_CDESC_SIZE, + chan->rx_dma_cdesc_virt, chan->rx_dma_cdesc); + chan->rx_dma_desc_virt = NULL; + } + + spin_unlock_bh(&chan->cleanup_lock); + + return; +} + +/****************************************************************************** + *****************************************************************************/ +/* Alloc sw descriptors ring + */ +static struct al_crypto_sw_desc **al_crypto_alloc_sw_ring(struct al_crypto_chan *chan, + int order, gfp_t flags) +{ + struct al_crypto_sw_desc **ring; + int descs = 1 << order; + int i; + + /* allocate the array to hold the software ring */ + ring = kcalloc(descs, sizeof(*ring), flags); + if (!ring) + return NULL; + + for (i = 0; i < descs; i++) { + ring[i] = al_crypto_alloc_ring_ent(chan, flags); + if (!ring[i]) { + al_crypto_free_sw_ring(ring, chan , i); + return NULL; + } + /* set_desc_id(ring[i], i); */ + } + + return ring; +} + +/****************************************************************************** + *****************************************************************************/ +/* Free sw descriptors ring +*/ +static void al_crypto_free_sw_ring(struct al_crypto_sw_desc **ring, + struct al_crypto_chan *chan, + int size) +{ + int i; + + for (i = 0; i < size; i++) + al_crypto_free_ring_ent(ring[i], chan); + + kfree(ring); +} + +/****************************************************************************** + *****************************************************************************/ +/* Alloc sw descriptor + */ +static struct al_crypto_sw_desc *al_crypto_alloc_ring_ent(struct al_crypto_chan *chan, + gfp_t flags) +{ + struct al_crypto_sw_desc *desc; + + desc = kmem_cache_zalloc(chan->device->cache, flags); + if (!desc) + return NULL; + + return desc; +} + +/****************************************************************************** + *****************************************************************************/ +/* Free sw descriptor + */ +static void al_crypto_free_ring_ent(struct al_crypto_sw_desc *desc, + struct al_crypto_chan *chan) +{ + kmem_cache_free(chan->device->cache, desc); +} + +/****************************************************************************** + *****************************************************************************/ +/* Get sw desc + */ +int al_crypto_get_sw_desc(struct al_crypto_chan *chan, int num) +{ + dev_dbg(to_dev(chan), "%s: idx=%d num=%d ring_space=%d ring_size=%d\n", + __func__, chan->idx, num, al_crypto_ring_space(chan), + al_crypto_ring_size(chan)); + + if (likely(al_crypto_ring_space(chan) >= num)) { + dev_dbg(to_dev(chan), "%s: head=%x tail=%x\n", + __func__, chan->head, chan->tail); + return 0; + } + + return -ENOMEM; +} + +/****************************************************************************** + *****************************************************************************/ +/* Handler used for vector-per-channel interrupt mode + */ +static irqreturn_t al_crypto_do_interrupt_msix(int irq, void *data) +{ + struct al_crypto_chan *chan = data; + + dev_dbg(to_dev(chan), "%s: irq=%d idx=%d\n", + __func__, irq, chan->idx); + + tasklet_schedule(&chan->cleanup_task); + + return IRQ_HANDLED; +} + +/****************************************************************************** + *****************************************************************************/ +/* Handler for interrupt group d + */ +static irqreturn_t al_crypto_do_interrupt_group_d(int irq, void *data) +{ + struct al_crypto_device *device = data; + + dev_dbg(&device->pdev->dev, "%s: irq=%d\n", __func__, irq); + + al_crypto_group_d_errors_handler(device); + + return IRQ_HANDLED; +} + +/****************************************************************************** + *****************************************************************************/ +/* Handler used for vector-per-group interrupt mode + */ +static irqreturn_t al_crypto_do_interrupt_msix_rx(int irq, void *data) +{ + struct al_crypto_device *device = data; + + dev_dbg(&device->pdev->dev, "%s: irq=%d\n", __func__, irq); + + tasklet_schedule(&device->cleanup_task); + + return IRQ_HANDLED; +} + +/****************************************************************************** + *****************************************************************************/ +/* Handler used for legacy interrupt mode + */ +static irqreturn_t al_crypto_do_interrupt_legacy(int irq, void *data) +{ + struct al_crypto_device *device = data; + + dev_dbg(&device->pdev->dev, "%s: irq=%d\n", __func__, irq); + + al_udma_iofic_mask((struct unit_regs *)device->udma_regs_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_A, + AL_INT_GROUP_A_GROUP_B_SUM | AL_INT_GROUP_A_GROUP_D_SUM); + + tasklet_schedule(&device->cleanup_task); + + return IRQ_HANDLED; +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_cleanup_fn(struct al_crypto_chan *chan, int from_tasklet) +{ + struct al_crypto_sw_desc *desc; + uint32_t comp_status; + u16 active; + int idx, i, rc; + + spin_lock_bh(&chan->cleanup_lock); + idx = chan->tail; + + active = al_crypto_ring_active(chan); + + dev_dbg(to_dev(chan), "%s: idx=%d head=%#x tail=%#x from_tasklet=%d active=%d\n", + __func__, chan->idx, chan->head, chan->tail, from_tasklet, active); + + AL_CRYPTO_STATS_SET(chan->stats_comp.max_active_descs, + (active > chan->stats_comp.max_active_descs) ? + active : chan->stats_comp.max_active_descs); + + for (i = 0; i < active; i++) { + rc = al_crypto_dma_completion(chan->hal_crypto, chan->idx, + &comp_status); + + /* if no completed transaction found -> exit */ + if (rc == 0) { + dev_dbg(to_dev(chan), "%s: idx=%d no completion\n", + __func__, chan->idx); + break; + } + + dev_dbg(to_dev(chan), "%s: idx=%d entry=%d comp_status=%u\n", + __func__, chan->idx, idx + i, comp_status); + + /* This will instruct the CPU to make sure the index is up to + date before reading the new item */ + smp_read_barrier_depends(); + + desc = al_crypto_get_ring_ent(chan, idx + i); + + if (desc->req_type == AL_CRYPTO_REQ_SKCIPHER) + al_crypto_skcipher_cleanup_single(chan, desc, comp_status); + else if (desc->req_type == AL_CRYPTO_REQ_AEAD) + al_crypto_aead_cleanup_single(chan, desc, comp_status); + else if (desc->req_type == AL_CRYPTO_REQ_AHASH) + al_crypto_cleanup_single_ahash(chan, desc, comp_status); + else if (desc->req_type == AL_CRYPTO_REQ_CRC) + al_crypto_cleanup_single_crc(chan, desc, comp_status); + else { + dev_err(to_dev(chan), "alg type %d is not supported\n", + desc->req_type); + BUG(); + } + } + + /* This will make sure the CPU has finished reading the item + before it writes the new tail pointer, which will erase the item */ + smp_mb(); /* finish all descriptor reads before incrementing tail */ + + chan->tail = idx + i; + + dev_dbg(to_dev(chan), "%s: idx=%d head=%x tail=%x\n", + __func__, chan->idx, chan->head, chan->tail); + + /* Keep track of redundant interrupts - interrupts that doesn't + yield completions */ + if (unlikely(from_tasklet && (!i))) + AL_CRYPTO_STATS_INC(chan->stats_comp.redundant_int_cnt, 1); + + spin_unlock_bh(&chan->cleanup_lock); + + /* Currently only skcipher reqs can be backlogged */ + if (i && chan->sw_queue.qlen) + al_crypto_skcipher_process_queue(chan); + + dev_dbg(to_dev(chan), "%s: idx=%d\n", __func__, chan->idx); + + return i; +}; + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_group_d_errors_handler(struct al_crypto_device *device) +{ + u32 read_cause_group_d, read_cause_crypto_reg_a; + + read_cause_group_d = al_udma_iofic_read_cause(device->udma_regs_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_D); + + dev_err(&device->pdev->dev, + "%s: got error - %08x from group D\n", + __func__, read_cause_group_d); + + if (read_cause_group_d & AL_INT_GROUP_D_APP_EXT_INT) { + read_cause_crypto_reg_a = + al_iofic_read_cause( + device->crypto_regs_base + + AL_CRYPTO_APP_IOFIC_OFFSET, + AL_INT_GROUP_A); + + al_iofic_unmask( + device->crypto_regs_base + AL_CRYPTO_APP_IOFIC_OFFSET, + AL_INT_GROUP_A, + AL_CRYPTO_APP_INT_A_ALL); + + al_udma_iofic_unmask( + (struct unit_regs *)device->udma_regs_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_A, + AL_INT_GROUP_A_GROUP_D_SUM); + + dev_err(&device->pdev->dev, + "got error - %08x from APP group A\n", + read_cause_crypto_reg_a); + } +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_cleanup_tasklet(unsigned long data) +{ + struct al_crypto_chan *chan = (struct al_crypto_chan *)data; + int num_completed; + + dev_dbg(to_dev(chan), "%s: idx=%d head=%x tail=%x sw_desc_num_locked=%d tx_desc_produced=%d\n", + __func__, chan->idx, chan->head, chan->tail, + chan->sw_desc_num_locked, chan->tx_desc_produced); + + num_completed = al_crypto_cleanup_fn(chan, 1); + + dev_dbg(to_dev(chan), "%s: idx=%d num_completed=%d\n", + __func__, chan->idx, num_completed); + + if (unlikely(num_completed < 0)) + dev_err(to_dev(chan), "al_crypto_cleanup_fn failed\n"); + + al_udma_iofic_unmask((struct unit_regs *)chan->device->udma_regs_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_B, + 1 << chan->idx); +} + +/****************************************************************************** + *****************************************************************************/ +static inline void al_crypto_cleanup_q_group_fn(struct al_crypto_device *device, + int group) +{ + int num_completed; + unsigned int status; + int i; + + status = al_udma_iofic_read_cause((struct unit_regs *)device->udma_regs_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, + group); + + for (i = 0; i < device->num_channels; i++) { + if (status & AL_BIT(i)) { + num_completed = al_crypto_cleanup_fn(device->channels[i], 1); + + if (unlikely(num_completed < 0)) + dev_err(to_dev(device->channels[i]), + "al_crypto_cleanup_fn failed\n"); + } + } +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_cleanup_tasklet_msix_rx(unsigned long data) +{ + struct al_crypto_device *device = (struct al_crypto_device *)data; + unsigned int status; + + status = al_udma_iofic_read_cause( + (struct unit_regs *)device->udma_regs_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_D); + + if (unlikely(status)) + al_crypto_group_d_errors_handler(device); + + al_crypto_cleanup_q_group_fn(device, AL_INT_GROUP_B); + + al_udma_iofic_unmask((struct unit_regs *)device->udma_regs_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_A, + AL_INT_GROUP_A_GROUP_B_SUM); +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_cleanup_tasklet_legacy(unsigned long data) +{ + struct al_crypto_device *device = (struct al_crypto_device *)data; + unsigned int status; + + status = al_udma_iofic_read_cause((struct unit_regs *)device->udma_regs_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_A); + + if (unlikely(status & AL_INT_GROUP_A_GROUP_D_SUM)) + al_crypto_group_d_errors_handler(device); + else if (status & AL_INT_GROUP_A_GROUP_B_SUM) + al_crypto_cleanup_q_group_fn(device, AL_INT_GROUP_B); + + al_udma_iofic_unmask((struct unit_regs *)device->udma_regs_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_A, + AL_INT_GROUP_A_GROUP_B_SUM); +} + +/****************************************************************************** + *****************************************************************************/ +/* Update the LRU list according to the currently accessed entry + */ +void al_crypto_cache_update_lru(struct al_crypto_chan *chan, + struct al_crypto_cache_state *ctx) +{ + struct list_head *ptr; + struct al_crypto_cache_lru_entry *lru_entry = NULL; + uint32_t list_idx = 0; + + /* skip update if cache not yet populated */ + if (unlikely(chan->cache_lru_count <= 1)) + return; + + list_for_each(ptr, &chan->cache_lru_list) { + lru_entry = list_entry(ptr, + struct al_crypto_cache_lru_entry, + list); + if (lru_entry->ctx == ctx) + break; + list_idx++; + } + + /* The entry has to be in the list */ + BUG_ON(lru_entry->ctx != ctx); + + /* move to tail only if needed */ + if (list_idx != (chan->cache_lru_count - 1)) + list_move_tail(ptr, &chan->cache_lru_list); +} + +/****************************************************************************** + *****************************************************************************/ +/* Translate cache entry index in ring[0..3] to global index: [0..15] in sa + * sa cache, [0..7] in crc iv cache + */ +static inline uint32_t al_crypto_ring_cache_idx(struct al_crypto_chan *chan, int cache_idx) +{ + struct al_crypto_device *device = chan->device; + int chan_idx = chan->idx; + + if (chan->idx >= (device->num_channels - device->crc_channels)) + chan_idx = chan->idx - + (device->num_channels - device->crc_channels); + + return (chan_idx * chan->cache_entries_num) + cache_idx; +} + +/****************************************************************************** + *****************************************************************************/ +/* Replace least recently used cache entry with current entry + */ +uint32_t al_crypto_cache_replace_lru(struct al_crypto_chan *chan, + struct al_crypto_cache_state *ctx, + struct al_crypto_cache_state **old_ctx) +{ + struct al_crypto_cache_lru_entry *lru_entry = NULL; + + if (chan->cache_lru_count < chan->cache_entries_num) { + /* find a free entry */ + int i; + for (i = 0; i < chan->cache_entries_num; i++) { + lru_entry = &chan->cache_lru_entries[i]; + if (lru_entry->ctx && lru_entry->ctx->cached) + continue; + else + break; + } + + BUG_ON(!lru_entry); + BUG_ON(i >= chan->cache_entries_num); + + lru_entry->cache_idx = + al_crypto_ring_cache_idx(chan, i); + lru_entry->ctx = ctx; + + list_add_tail(&lru_entry->list, + &chan->cache_lru_list); + chan->cache_lru_count++; + if (old_ctx) + *old_ctx = NULL; + } else { + AL_CRYPTO_STATS_INC(chan->stats_prep.cache_misses, 1); + lru_entry = list_first_entry(&chan->cache_lru_list, + struct al_crypto_cache_lru_entry, + list); + /* Invalidate old ctx */ + lru_entry->ctx->cached = false; + /* Return old ctx if needed */ + if (old_ctx) + *old_ctx = lru_entry->ctx; + /* Connect new ctx */ + lru_entry->ctx = ctx; + /* Move current entry to end of LRU list */ + list_rotate_left(&chan->cache_lru_list); + } + + ctx->cached = true; + ctx->idx = lru_entry->cache_idx; + return lru_entry->cache_idx; +} + +/****************************************************************************** + *****************************************************************************/ +/* Remove the entry from LRU list + */ +void al_crypto_cache_remove_lru(struct al_crypto_chan *chan, + struct al_crypto_cache_state *ctx) +{ + struct list_head *ptr; + struct al_crypto_cache_lru_entry *lru_entry = NULL; + uint32_t list_idx = 0; + + /* lru list is empty */ + if (chan->cache_lru_count == 0) + return; + + list_for_each(ptr, &chan->cache_lru_list) { + lru_entry = list_entry(ptr, + struct al_crypto_cache_lru_entry, + list); + if (lru_entry->ctx == ctx) + break; + list_idx++; + } + + /* The entry has to be in the list */ + BUG_ON(lru_entry->ctx != ctx); + + list_del(ptr); + lru_entry->ctx = NULL; + chan->cache_lru_count--; + ctx->cached = false; +} + +/****************************************************************************** + *****************************************************************************/ +/* Move ring tail to process prepared descriptors + */ +void al_crypto_tx_submit(struct al_crypto_chan *chan) +{ + dev_dbg(to_dev(chan), "%s: idx=%d head=%x tail=%x sw_desc_num_locked=%d tx_desc_produced=%d\n", + __func__, chan->idx, chan->head, chan->tail, + chan->sw_desc_num_locked, chan->tx_desc_produced); + + /* according to Documentation/circular-buffers.txt we should have */ + /* smp_wmb before intcrementing the head, however, the */ + /* al_crypto_dma_action contains writel() which implies dmb on ARM */ + /* so this smp_wmb() can be omitted on ARM platforms */ + /*smp_wmb();*/ /* commit the item before incrementing the head */ + chan->head += chan->sw_desc_num_locked; + /* in our case the consumer (interrupt handler) will be waken up by */ + /* the hw, so we send the transaction to the hw after incrementing */ + /* the head */ + + al_crypto_dma_action(chan->hal_crypto, + chan->idx, chan->tx_desc_produced); +} + +/****************************************************************************** + *****************************************************************************/ +/* Set interrupt moderation interval, each tick ~= 1.5usecs + */ +void al_crypto_set_int_moderation(struct al_crypto_device *device, int interval) +{ + int i; + + for (i = 0; i < device->num_channels; i++) + al_iofic_msix_moder_interval_config( + &((struct unit_regs *)(device->udma_regs_base))->gen. + interrupt_regs.main_iofic, + AL_INT_GROUP_B, + i, + interval); + + device->int_moderation = interval; +} + +/****************************************************************************** + *****************************************************************************/ +/* Get interrupt moderation interval + */ +int al_crypto_get_int_moderation(struct al_crypto_device *device) +{ + return device->int_moderation; +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_keylen_to_sa_aes_ksize(unsigned int keylen, + enum al_crypto_sa_aes_ksize *ksize) +{ + switch (keylen) { + case 16: /* 128 bit */ + *ksize = AL_CRYPT_AES_128; + return 0; + case 24: /* 192 bit */ + *ksize = AL_CRYPT_AES_192; + return 0; + case 32: /* 256 bit */ + *ksize = AL_CRYPT_AES_256; + return 0; + default: /* Invalid key size */ + return 1; + } +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_sa_aes_ksize_to_keylen(enum al_crypto_sa_aes_ksize ksize, + unsigned int *keylen) +{ + switch (ksize) { + case AL_CRYPT_AES_128: + *keylen = 16; + return 0; + case AL_CRYPT_AES_192: + *keylen = 24; + return 0; + case AL_CRYPT_AES_256: + *keylen = 32; + return 0; + default: + return 1; + } +} + +/****************************************************************************** + *****************************************************************************/ +void al_crypto_hexdump_sgl(const char *level, struct scatterlist *sgl, + const char *name, off_t skip, int len, gfp_t gfp_flags) +{ + char prefix[64]; + void *buf; + + buf = kmalloc(len, gfp_flags); + if (!buf) + return; + + snprintf(prefix, sizeof(prefix), KBUILD_MODNAME ": %s: ", name); + scatterwalk_map_and_copy(buf, sgl, skip, len, 0); + print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); + + kfree(buf); +} diff --git a/target/linux/alpine/files/drivers/crypto/al/al_crypto_crc.c b/target/linux/alpine/files/drivers/crypto/al/al_crypto_crc.c new file mode 100644 index 00000000000000..3657c229aaffb5 --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/al_crypto_crc.c @@ -0,0 +1,615 @@ +/* + * drivers/crypto/al_crypto_crc.c + * + * Annapurna Labs Crypto driver - crc/checksum algorithms + * + * Copyright (C) 2013 Annapurna Labs Ltd. + * + * Algorithm registration code and chained scatter/gather lists + * handling based on caam driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "linux/export.h" +#include "linux/crypto.h" +#include +#include +#include +#include + +#include +#include + +#include "al_crypto.h" + +#define AL_CRYPTO_CRA_PRIORITY 300 + +static int crc_init(struct ahash_request *req); + +static int crc_update(struct ahash_request *req); + +static int crc_final(struct ahash_request *req); + +static int crc_finup(struct ahash_request *req); + +static int crc_digest(struct ahash_request *req); + +static int crc_export(struct ahash_request *req, void *out); + +static int crc_import(struct ahash_request *req, const void *in); + +static int crc_setkey(struct crypto_ahash *ahash, + const u8 *key, unsigned int keylen); + +struct al_crc_req_ctx { + /* Make sure the following field isn't share the same cache line + * with other fields. + * This field is DMAed */ + uint32_t result ____cacheline_aligned; + bool last ____cacheline_aligned; + struct al_crypto_cache_state cache_state; + dma_addr_t crc_dma_addr; +}; + +struct al_crc_ctx { + struct al_crypto_chan *chan; + enum al_crc_checksum_type crcsum_type; + uint32_t key; +}; + +struct al_crc_template { + char name[CRYPTO_MAX_ALG_NAME]; + char driver_name[CRYPTO_MAX_ALG_NAME]; + unsigned int blocksize; + struct ahash_alg template_ahash; + enum al_crc_checksum_type crcsum_type; +}; + +static struct al_crc_template driver_crc[] = { + { + .name = "crc32c", + .driver_name = "crc32c-al", + .blocksize = CHKSUM_BLOCK_SIZE, + .template_ahash = { + .init = crc_init, + .update = crc_update, + .final = crc_final, + .finup = crc_finup, + .digest = crc_digest, + .export = crc_export, + .import = crc_import, + .setkey = crc_setkey, + .halg = { + .digestsize = CHKSUM_DIGEST_SIZE, + .statesize = sizeof(struct al_crc_req_ctx), + }, + }, + .crcsum_type = AL_CRC_CHECKSUM_CRC32C, + }, +}; + +struct al_crc { + struct list_head entry; + struct al_crypto_device *device; + enum al_crc_checksum_type crcsum_type; + struct ahash_alg ahash_alg; +}; + +/****************************************************************************** + *****************************************************************************/ +static int al_crc_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_alg *base = tfm->__crt_alg; + struct hash_alg_common *halg = + container_of(base, struct hash_alg_common, base); + struct ahash_alg *alg = + container_of(halg, struct ahash_alg, halg); + struct al_crc *al_crc = + container_of(alg, struct al_crc, ahash_alg); + struct al_crc_ctx *ctx = crypto_tfm_ctx(tfm); + struct al_crypto_device *device = al_crc->device; + int chan_idx = (atomic_inc_return(&device->crc_tfm_count) % + device->crc_channels) + + (device->num_channels - device->crc_channels); + + ctx->chan = device->channels[chan_idx]; + + ctx->crcsum_type = al_crc->crcsum_type; + + ctx->key = 0; + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct al_crc_req_ctx)); + + AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock); + AL_CRYPTO_STATS_INC(ctx->chan->stats_gen.crc_tfms, 1); + AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crc_cra_exit(struct crypto_tfm *tfm) +{ + struct al_crc_ctx *ctx = crypto_tfm_ctx(tfm); + + AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock); + AL_CRYPTO_STATS_DEC(ctx->chan->stats_gen.crc_tfms, 1); + AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock); + + return; +} + +/****************************************************************************** + *****************************************************************************/ +/* DMA unmap buffers for crc request + */ +static inline void al_crypto_dma_unmap_crc( + struct al_crypto_chan *chan, + struct al_crypto_sw_desc *desc) +{ + struct ahash_request *req = + (struct ahash_request *)desc->req; + struct al_crc_req_ctx *req_ctx = + ahash_request_ctx(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + unsigned int digestsize = crypto_ahash_digestsize(ahash); + + if (desc->src_nents) + dma_unmap_sg(to_dev(chan), + req->src, + desc->src_nents, + DMA_TO_DEVICE); + + if (req_ctx->last) { + dma_unmap_single(to_dev(chan), + req_ctx->crc_dma_addr, + digestsize, + DMA_BIDIRECTIONAL); + put_unaligned_le32(req_ctx->result, req->result); + } +} + +/****************************************************************************** + *****************************************************************************/ +/* Cleanup single crc request - invoked from cleanup tasklet (interrupt + * handler) + */ +void al_crypto_cleanup_single_crc( + struct al_crypto_chan *chan, + struct al_crypto_sw_desc *desc, + uint32_t comp_status) +{ + struct ahash_request *req = + (struct ahash_request *)desc->req; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crc_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req); + + al_crypto_dma_unmap_crc(chan, desc); + + /* LRU list access has to be protected */ + if (req_ctx->last) { + spin_lock(&ctx->chan->prep_lock); + if (req_ctx->cache_state.cached) + al_crypto_cache_remove_lru(chan, &req_ctx->cache_state); + spin_unlock(&ctx->chan->prep_lock); + } + + req->base.complete(&req->base, 0); +} + +/****************************************************************************** + *****************************************************************************/ +static int crc_init(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crc_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req); + struct al_crypto_chan *chan = ctx->chan; + unsigned int digestsize = crypto_ahash_digestsize(ahash); + + req_ctx->last = false; + req_ctx->cache_state.cached = false; + + put_unaligned_le32(ctx->key, &req_ctx->result); + + req_ctx->crc_dma_addr = dma_map_single(to_dev(chan), + &req_ctx->result, + digestsize, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(to_dev(chan), req_ctx->crc_dma_addr)) { + dev_err(to_dev(chan), "dma_map_single failed\n"); + return -ENOMEM; + } + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static inline void crc_req_prepare_xaction_buffers(struct ahash_request *req, + struct al_crypto_sw_desc *desc, + int nbytes, int src_nents, + int *src_idx) +{ + *src_idx = 0; + + if (src_nents) + sg_map_to_xaction_buffers(req->src, desc->src_bufs, nbytes, + src_idx); +} + +/****************************************************************************** + *****************************************************************************/ +static inline void crc_update_stats(int nbytes, + struct al_crypto_chan *chan) +{ + AL_CRYPTO_STATS_INC(chan->stats_prep.crc_reqs, 1); + AL_CRYPTO_STATS_INC(chan->stats_prep.crc_bytes, nbytes); + + if (nbytes <= 512) + AL_CRYPTO_STATS_INC(chan->stats_prep.crc_reqs_le512, 1); + else if ((nbytes > 512) && (nbytes <= 2048)) + AL_CRYPTO_STATS_INC(chan->stats_prep.crc_reqs_512_2048, 1); + else if ((nbytes > 2048) && (nbytes <= 4096)) + AL_CRYPTO_STATS_INC(chan->stats_prep.crc_reqs_2048_4096, 1); + else + AL_CRYPTO_STATS_INC(chan->stats_prep.crc_reqs_gt4096, 1); +} + +/****************************************************************************** + *****************************************************************************/ +static inline void crc_req_prepare_xaction(struct ahash_request *req, + int nbytes, + struct al_crypto_sw_desc *desc, + int src_nents) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crc_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req); + struct al_crypto_chan *chan = ctx->chan; + struct al_crc_transaction *xaction; + unsigned int digestsize = crypto_ahash_digestsize(ahash); + int src_idx; + + /* prepare hal transaction */ + xaction = &desc->hal_crc_xaction; + memset(xaction, 0, sizeof(struct al_crc_transaction)); + xaction->crcsum_type = ctx->crcsum_type; + xaction->xor_valid = AL_TRUE; + xaction->in_xor = ~0; + xaction->res_xor = ~0; + + /* if the entry is not cached, take stored iv */ + if (!(req_ctx->cache_state.cached)) { + xaction->crc_iv_in.addr = req_ctx->crc_dma_addr; + xaction->crc_iv_in.len = digestsize; + } + + /* both store in cache and output intermediate result */ + /* cached result will be used unless it will be replaced */ + xaction->crc_out.addr = req_ctx->crc_dma_addr; + xaction->crc_out.len = digestsize; + + if (likely(!req_ctx->last)) { + xaction->st_crc_out = AL_TRUE; + + if (!req_ctx->cache_state.cached) { + xaction->cached_crc_indx = al_crypto_cache_replace_lru( + chan, &req_ctx->cache_state, + NULL); + xaction->flags = AL_SSM_BARRIER; + } else { + al_crypto_cache_update_lru(chan, &req_ctx->cache_state); + xaction->cached_crc_indx = req_ctx->cache_state.idx; + } + } + + crc_req_prepare_xaction_buffers(req, desc, nbytes, src_nents, + &src_idx); + + xaction->src.bufs = &desc->src_bufs[0]; + xaction->src.num = src_idx; + + dev_dbg(to_dev(chan), + "%s: req_ctx->cache_state.cached=%d\n", + __func__, req_ctx->cache_state.cached); + + xaction->flags = AL_SSM_INTERRUPT; + + crc_update_stats(nbytes, chan); +} + +/****************************************************************************** + *****************************************************************************/ +/* Main CRC processing function that handles update/final/finup and digest + * + * Implementation is based on the assumption that the caller waits for + * completion of every operation before issuing the next operation + */ +static int crc_process_req(struct ahash_request *req, unsigned int nbytes) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crc_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req); + struct al_crypto_chan *chan = ctx->chan; + unsigned int digestsize = crypto_ahash_digestsize(ahash); + + int idx; + int src_nents = 0; + struct al_crypto_sw_desc *desc; + int rc = 0; + + dev_dbg(to_dev(chan), + "%s: nbytes=%d, last=%d\n", + __func__, nbytes, req_ctx->last); + + if (nbytes) { + src_nents = sg_nents_for_len(req->src, nbytes); + + dev_dbg(to_dev(chan), "%s: src_nents=%d\n", __func__, + src_nents); + + dma_map_sg(to_dev(chan), req->src, src_nents, + DMA_TO_DEVICE); + + spin_lock_bh(&chan->prep_lock); + if (likely(al_crypto_get_sw_desc(chan, 1) == 0)) + idx = chan->head; + else { + spin_unlock_bh(&chan->prep_lock); + dev_err( + to_dev(chan), + "%s: al_crypto_get_sw_desc failed!\n", + __func__); + + if (src_nents) + dma_unmap_sg(to_dev(chan), req->src, src_nents, + DMA_TO_DEVICE); + + return -ENOSPC; + } + + chan->sw_desc_num_locked = 1; + chan->tx_desc_produced = 0; + + desc = al_crypto_get_ring_ent(chan, idx); + desc->req = (void *)req; + desc->req_type = AL_CRYPTO_REQ_CRC; + desc->src_nents = src_nents; + + crc_req_prepare_xaction(req, nbytes, desc, src_nents); + + /* send crypto transaction to engine */ + rc = al_crc_csum_prepare(chan->hal_crypto, chan->idx, + &desc->hal_crc_xaction); + if (unlikely(rc != 0)) { + dev_err(to_dev(chan), + "al_crypto_dma_prepare failed!\n"); + + al_crypto_dma_unmap_crc(chan, desc); + + spin_unlock_bh(&chan->prep_lock); + return rc; + } + + chan->tx_desc_produced += desc->hal_crc_xaction.tx_descs_count; + + al_crypto_tx_submit(chan); + + spin_unlock_bh(&chan->prep_lock); + + return -EINPROGRESS; + } else if (likely(req_ctx->last)) { + dma_unmap_single(to_dev(chan), req_ctx->crc_dma_addr, + digestsize, DMA_BIDIRECTIONAL); + put_unaligned_le32(req_ctx->result, req->result); + + spin_lock_bh(&ctx->chan->prep_lock); + if (req_ctx->cache_state.cached) + al_crypto_cache_remove_lru(chan, &req_ctx->cache_state); + spin_unlock_bh(&ctx->chan->prep_lock); + } + + return rc; +} + +/****************************************************************************** + *****************************************************************************/ +static int crc_update(struct ahash_request *req) +{ + struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req); + + req_ctx->last = false; + + return crc_process_req(req, req->nbytes); +} + +/****************************************************************************** + *****************************************************************************/ +static int crc_final(struct ahash_request *req) +{ + struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req); + + req_ctx->last = true; + + return crc_process_req(req, 0); +} + +/****************************************************************************** + *****************************************************************************/ +static int crc_finup(struct ahash_request *req) +{ + struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req); + + req_ctx->last = true; + + return crc_process_req(req, req->nbytes); +} + +/****************************************************************************** + *****************************************************************************/ +static int crc_digest(struct ahash_request *req) +{ + struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + + ahash->init(req); + req_ctx->last = true; + + return crc_process_req(req, req->nbytes); +} + +/****************************************************************************** + *****************************************************************************/ +static int crc_export(struct ahash_request *req, void *out) +{ + struct al_crc_req_ctx *state = ahash_request_ctx(req); + + memcpy(out, state, sizeof(struct al_crc_req_ctx)); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static int crc_import(struct ahash_request *req, const void *in) +{ + struct al_crc_req_ctx *state = ahash_request_ctx(req); + + memcpy(state, in, sizeof(struct al_crc_req_ctx)); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static int crc_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen) +{ + struct al_crc_ctx *ctx = crypto_ahash_ctx(ahash); + + if (keylen != sizeof(ctx->key)) + return -EINVAL; + + ctx->key = ~get_unaligned_le32(key); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static struct al_crc *al_crc_alloc( + struct al_crypto_device *device, + struct al_crc_template *template) +{ + struct al_crc *t_alg; + struct ahash_alg *halg; + struct crypto_alg *alg; + + t_alg = kzalloc(sizeof(struct al_crc), GFP_KERNEL); + if (!t_alg) { + dev_err(&device->pdev->dev, "failed to allocate t_alg\n"); + return ERR_PTR(-ENOMEM); + } + + t_alg->ahash_alg = template->template_ahash; + halg = &t_alg->ahash_alg; + alg = &halg->halg.base; + + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", + template->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->driver_name); + + alg->cra_module = THIS_MODULE; + alg->cra_init = al_crc_cra_init; + alg->cra_exit = al_crc_cra_exit; + alg->cra_priority = AL_CRYPTO_CRA_PRIORITY; + alg->cra_blocksize = template->blocksize; + alg->cra_alignmask = 0; + alg->cra_ctxsize = sizeof(struct al_crc_ctx); + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_OPTIONAL_KEY; + + t_alg->crcsum_type = template->crcsum_type; + t_alg->device = device; + + return t_alg; +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_crc_init(struct al_crypto_device *device) +{ + int i; + int err = 0; + + INIT_LIST_HEAD(&device->crc_list); + + if (!device->crc_channels) + return 0; + + atomic_set(&device->crc_tfm_count, -1); + + /* register crypto algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(driver_crc); i++) { + struct al_crc *t_alg; + + t_alg = al_crc_alloc(device, &driver_crc[i]); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + dev_warn(&device->pdev->dev, + "%s alg allocation failed\n", + driver_crc[i].driver_name); + continue; + } + + err = crypto_register_ahash(&t_alg->ahash_alg); + if (err) { + dev_warn(&device->pdev->dev, + "%s alg registration failed\n", + t_alg->ahash_alg.halg.base.cra_driver_name); + kfree(t_alg); + } else + list_add_tail(&t_alg->entry, &device->crc_list); + } + + if (!list_empty(&device->crc_list)) + dev_info(&device->pdev->dev, + "crc/csum algorithms registered in /proc/crypto\n"); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +void al_crypto_crc_terminate(struct al_crypto_device *device) +{ + struct al_crc *t_alg, *n; + + if (!device->crc_list.next) + return; + + list_for_each_entry_safe(t_alg, n, &device->crc_list, entry) { + crypto_unregister_ahash(&t_alg->ahash_alg); + list_del(&t_alg->entry); + kfree(t_alg); + } +} diff --git a/target/linux/alpine/files/drivers/crypto/al/al_crypto_hash.c b/target/linux/alpine/files/drivers/crypto/al/al_crypto_hash.c new file mode 100644 index 00000000000000..69691682a1582e --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/al_crypto_hash.c @@ -0,0 +1,1193 @@ +/* + * drivers/crypto/al_crypto_hash.c + * + * Annapurna Labs Crypto driver - hash algorithms + * + * Copyright (C) 2012 Annapurna Labs Ltd. + * + * Algorithm registration code and chained scatter/gather lists + * handling based on caam driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "linux/export.h" +#include "linux/crypto.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "al_crypto.h" + +#define AL_CRYPTO_CRA_PRIORITY 300 + +#define AL_CRYPTO_HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE +#define AL_CRYPTO_HASH_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE + + +static int ahash_init(struct ahash_request *req); + +static int ahash_update(struct ahash_request *req); + +static int ahash_final(struct ahash_request *req); + +static int ahash_finup(struct ahash_request *req); + +static int ahash_digest(struct ahash_request *req); + +static int ahash_export(struct ahash_request *req, void *out); + +static int ahash_import(struct ahash_request *req, const void *in); + +static int ahash_setkey(struct crypto_ahash *ahash, + const u8 *key, unsigned int keylen); + +/* ahash request ctx */ +struct al_crypto_hash_req_ctx { + /* Note 1: + * buf_0 and buf_1 are used for keeping the data that + * was not hashed during current update for the next update + * Note 2: + * buf_0, buf_1 and interm are DMAed so they shouldn't + * share the same cache line + * with other fields + * */ + uint8_t buf_0[AL_CRYPTO_HASH_MAX_BLOCK_SIZE] ____cacheline_aligned; + uint8_t buf_1[AL_CRYPTO_HASH_MAX_BLOCK_SIZE] ____cacheline_aligned; + /* intermediate state */ + uint8_t interm[AL_CRYPTO_HASH_MAX_DIGEST_SIZE] ____cacheline_aligned; + int buflen_0 ____cacheline_aligned; + int buflen_1; + uint8_t current_buf; /* select active buffer for current update */ + dma_addr_t buf_dma_addr; + int buf_dma_len; + dma_addr_t interm_dma_addr; + bool first; + bool last; + uint32_t hashed_len; +}; + +struct al_crypto_hash_template { + char name[CRYPTO_MAX_ALG_NAME]; + char driver_name[CRYPTO_MAX_ALG_NAME]; + char hmac_name[CRYPTO_MAX_ALG_NAME]; + char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; + unsigned int blocksize; + struct ahash_alg template_ahash; + enum al_crypto_sa_auth_type auth_type; + enum al_crypto_sa_sha2_mode sha2_mode; + enum al_crypto_sa_op sa_op; + char sw_hash_name[CRYPTO_MAX_ALG_NAME]; + unsigned int sw_hash_interm_offset; + unsigned int sw_hash_interm_size; +}; + +static struct al_crypto_hash_template driver_hash[] = { + { + .name = "sha1", + .driver_name = "sha1-al", + .hmac_name = "hmac(sha1)", + .hmac_driver_name = "hmac-sha1-al", + .blocksize = SHA1_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct al_crypto_hash_req_ctx), + }, + }, + .auth_type = AL_CRYPT_AUTH_SHA1, + .sha2_mode = 0, + .sa_op = AL_CRYPT_AUTH_ONLY, + .sw_hash_name = "sha1", + .sw_hash_interm_offset = offsetof(struct sha1_state, state), + .sw_hash_interm_size = sizeof(((struct sha1_state *)0)->state), + }, + { + .name = "sha256", + .driver_name = "sha256-al", + .hmac_name = "hmac(sha256)", + .hmac_driver_name = "hmac-sha256-al", + .blocksize = SHA256_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct al_crypto_hash_req_ctx), + }, + }, + .auth_type = AL_CRYPT_AUTH_SHA2, + .sha2_mode = AL_CRYPT_SHA2_256, + .sa_op = AL_CRYPT_AUTH_ONLY, + .sw_hash_name = "sha256", + .sw_hash_interm_offset = offsetof(struct sha256_state, state), + .sw_hash_interm_size = sizeof(((struct sha256_state *)0)->state), + }, + { + .name = "sha256", + .driver_name = "sha256-al", + .hmac_name = "hmac(sha256)", + .hmac_driver_name = "hmac-sha256-al", + .blocksize = SHA256_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct al_crypto_hash_req_ctx), + }, + }, + .auth_type = AL_CRYPT_AUTH_SHA2, + .sha2_mode = AL_CRYPT_SHA2_256, + .sa_op = AL_CRYPT_AUTH_ONLY, + .sw_hash_name = "sha256", + .sw_hash_interm_offset = offsetof(struct sha256_state, state), + .sw_hash_interm_size = sizeof(((struct sha256_state *)0)->state), + }, + { + .name = "sha512", + .driver_name = "sha512-al", + .hmac_name = "hmac(sha512)", + .hmac_driver_name = "hmac-sha512-al", + .blocksize = SHA512_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct al_crypto_hash_req_ctx), + }, + }, + .auth_type = AL_CRYPT_AUTH_SHA2, + .sha2_mode = AL_CRYPT_SHA2_512, + .sa_op = AL_CRYPT_AUTH_ONLY, + .sw_hash_name = "sha512", + .sw_hash_interm_offset = offsetof(struct sha512_state, state), + .sw_hash_interm_size = sizeof(((struct sha512_state *)0)->state), + }, + { + .name = "sha384", + .driver_name = "sha384-al", + .hmac_name = "hmac(sha384)", + .hmac_driver_name = "hmac-sha384-al", + .blocksize = SHA384_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct al_crypto_hash_req_ctx), + }, + }, + .auth_type = AL_CRYPT_AUTH_SHA2, + .sha2_mode = AL_CRYPT_SHA2_384, + .sa_op = AL_CRYPT_AUTH_ONLY, + .sw_hash_name = "sha384", + .sw_hash_interm_offset = offsetof(struct sha512_state, state), + .sw_hash_interm_size = sizeof(((struct sha512_state *)0)->state), + }, + { + .name = "md5", + .driver_name = "md5-al", + .hmac_name = "hmac(md5)", + .hmac_driver_name = "hmac-md5-al", + .blocksize = MD5_HMAC_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct al_crypto_hash_req_ctx), + }, + }, + .auth_type = AL_CRYPT_AUTH_MD5, + .sha2_mode = 0, + .sa_op = AL_CRYPT_AUTH_ONLY, + .sw_hash_name = "md5", + .sw_hash_interm_offset = 0, + .sw_hash_interm_size = sizeof(struct md5_state), + }, +}; + +struct al_crypto_hash { + struct list_head entry; + struct al_crypto_device *device; + enum al_crypto_sa_auth_type auth_type; + enum al_crypto_sa_sha2_mode sha2_mode; + enum al_crypto_sa_op sa_op; + struct ahash_alg ahash_alg; + char sw_hash_name[CRYPTO_MAX_ALG_NAME]; + unsigned int sw_hash_interm_offset; + unsigned int sw_hash_interm_size; +}; + +/****************************************************************************** + *****************************************************************************/ +static u8 zero_message_hash_md5[MD5_DIGEST_SIZE] = { + 0xd4, 0x1d, 0x8c, 0xd9, 0x8f ,0x00 ,0xb2, 0x04, + 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e +}; + +static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, + 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, + 0xaf, 0xd8, 0x07, 0x09 +}; + +static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, + 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, + 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 +}; + +static u8 zero_message_hash_sha384[SHA384_DIGEST_SIZE] = { + 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38, + 0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a, + 0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43, + 0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda, + 0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb, + 0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b, +}; + +static u8 zero_message_hash_sha512[SHA512_DIGEST_SIZE] = { + 0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd, + 0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07, + 0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc, + 0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce, + 0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0, + 0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f, + 0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81, + 0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e, +}; + +/****************************************************************************** + *****************************************************************************/ +static inline int to_signature_size(int digest_size) +{ + return (digest_size / 4) - 1; +} + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_hash_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct al_crypto_hash *al_crypto_hash = container_of(alg, struct al_crypto_hash, ahash_alg); + struct al_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct al_crypto_device *device = al_crypto_hash->device; + int chan_idx = atomic_inc_return(&device->tfm_count) % + (device->num_channels - device->crc_channels); + struct crypto_shash *sw_hash = NULL; + + dev_dbg(&device->pdev->dev, "%s: cra_name=%s\n", + __func__, tfm->__crt_alg->cra_name); + + memset(&ctx->sa, 0, sizeof(struct al_crypto_sa)); + + /* Allocate SW hash for hmac long key hashing and key XOR ipad/opad + * intermediate calculations + */ + if (strlen(al_crypto_hash->sw_hash_name)) { + /* TODO: is CRYPTO_ALG_NEED_FALLBACK needed here? */ + sw_hash = crypto_alloc_shash(al_crypto_hash->sw_hash_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(sw_hash)) { + dev_err(&device->pdev->dev, + "%s: Failed to allocate SW hash '%s' with %ld\n", + __func__, al_crypto_hash->sw_hash_name, + PTR_ERR(sw_hash)); + return PTR_ERR(sw_hash); + } + } + ctx->sw_hash = sw_hash; + + ctx->chan = device->channels[chan_idx]; + + ctx->sa.auth_type = al_crypto_hash->auth_type; + ctx->sa.sha2_mode = al_crypto_hash->sha2_mode; + ctx->sa.sa_op = al_crypto_hash->sa_op; + ctx->sa.signature_size = to_signature_size(crypto_ahash_digestsize(ahash)); + + ctx->sa.auth_hmac_en = false; + ctx->cache_state.cached = false; + ctx->hw_sa = dma_alloc_coherent(&device->pdev->dev, + sizeof(struct al_crypto_hw_sa), + &ctx->hw_sa_dma_addr, + GFP_KERNEL); + + al_crypto_hw_sa_init(&ctx->sa, ctx->hw_sa); + + crypto_ahash_set_reqsize(ahash, sizeof(struct al_crypto_hash_req_ctx)); + + if (ctx->sw_hash) { + ctx->hmac_pads = kmalloc(2 * crypto_shash_descsize(ctx->sw_hash), GFP_KERNEL); + if (!ctx->hmac_pads) + return -ENOMEM; + } + + AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock); + AL_CRYPTO_STATS_INC(ctx->chan->stats_gen.ahash_tfms, 1); + AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_hash_cra_exit(struct crypto_tfm *tfm) +{ + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct al_crypto_hash *al_crypto_hash = container_of(alg, struct al_crypto_hash, ahash_alg); + struct al_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct al_crypto_device *device = al_crypto_hash->device; + + dev_dbg(&device->pdev->dev, "%s: cra_name=%s\n", + __func__, tfm->__crt_alg->cra_name); + + /* LRU list access has to be protected */ + spin_lock_bh(&ctx->chan->prep_lock); + if (ctx->cache_state.cached) + al_crypto_cache_remove_lru(ctx->chan, &ctx->cache_state); + spin_unlock_bh(&ctx->chan->prep_lock); + + if (ctx->hw_sa_dma_addr) + dma_free_coherent(&device->pdev->dev, + sizeof(struct al_crypto_hw_sa), + ctx->hw_sa, + ctx->hw_sa_dma_addr); + + if (ctx->sw_hash) + crypto_free_shash(ctx->sw_hash); + + if (ctx->hmac_pads) + kfree(ctx->hmac_pads); + + AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock); + AL_CRYPTO_STATS_DEC(ctx->chan->stats_gen.ahash_tfms, 1); + AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock); + + return; +} + +/****************************************************************************** + *****************************************************************************/ +/* DMA unmap buffers for ahash request + */ +static inline void al_crypto_dma_unmap_ahash(struct al_crypto_chan *chan, + struct al_crypto_sw_desc *desc, + unsigned int digestsize) +{ + struct ahash_request *req = (struct ahash_request *)desc->req; + struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req); + + if (req_ctx->buf_dma_len) + dma_unmap_single(to_dev(chan), + req_ctx->buf_dma_addr, + req_ctx->buf_dma_len, + DMA_TO_DEVICE); + if (desc->src_nents) + dma_unmap_sg(to_dev(chan), + req->src, + desc->src_nents, + DMA_TO_DEVICE); + + if (req_ctx->last) { + dma_unmap_single(to_dev(chan), + req_ctx->interm_dma_addr, + AL_CRYPTO_HASH_MAX_DIGEST_SIZE, + DMA_BIDIRECTIONAL); + memcpy(req->result, req_ctx->interm, digestsize); + } +} + +/****************************************************************************** + *****************************************************************************/ +static inline void zero_message_result_copy(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash); + unsigned int digestsize = crypto_ahash_digestsize(ahash); + struct al_crypto_chan *chan = ctx->chan; + u8* zero_message = NULL; + + dev_dbg(to_dev(chan), "%s: auth_type=%d\n", + __func__, ctx->sa.auth_type); + + switch (ctx->sa.auth_type) { + case (AL_CRYPT_AUTH_MD5): + zero_message = zero_message_hash_md5; + break; + case (AL_CRYPT_AUTH_SHA1): + zero_message = zero_message_hash_sha1; + break; + case (AL_CRYPT_AUTH_SHA2): + if (ctx->sa.sha2_mode == AL_CRYPT_SHA2_256) + zero_message = zero_message_hash_sha256; + if (ctx->sa.sha2_mode == AL_CRYPT_SHA2_384) + zero_message = zero_message_hash_sha384; + if (ctx->sa.sha2_mode == AL_CRYPT_SHA2_512) + zero_message = zero_message_hash_sha512; + break; + default: + dev_err(to_dev(chan),"ERROR, unsupported zero message\n"); + return; + } + + memcpy(req->result, zero_message, digestsize); +} +/****************************************************************************** + *****************************************************************************/ +/* Cleanup single ahash request - invoked from cleanup tasklet (interrupt + * handler) + */ +void al_crypto_cleanup_single_ahash(struct al_crypto_chan *chan, + struct al_crypto_sw_desc *desc, + uint32_t comp_status) +{ + struct ahash_request *req = (struct ahash_request *)desc->req; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + unsigned int digestsize = crypto_ahash_digestsize(ahash); + + dev_dbg(to_dev(chan), "%s: comp_status=%x\n", + __func__, comp_status); + + al_crypto_dma_unmap_ahash(chan, desc, digestsize); + + req->base.complete(&req->base, 0); +} + +/****************************************************************************** + *****************************************************************************/ +static int ahash_init(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req); + struct al_crypto_chan *chan = ctx->chan; + + dev_dbg(to_dev(chan), "%s: cra_name=%s nbytes=%d\n", + __func__, crypto_ahash_tfm(ahash)->__crt_alg->cra_name, req->nbytes); + + req_ctx->first = true; + req_ctx->last = false; + + req_ctx->current_buf = 0; + req_ctx->buflen_0 = 0; + req_ctx->buflen_1 = 0; + req_ctx->buf_dma_addr = 0; + req_ctx->interm_dma_addr = dma_map_single(to_dev(chan), + req_ctx->interm, + AL_CRYPTO_HASH_MAX_DIGEST_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(to_dev(chan), req_ctx->interm_dma_addr)) { + dev_err(to_dev(chan), "dma_map_single failed!\n"); + return -ENOMEM; + } + req_ctx->hashed_len = 0; + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static inline void ahash_req_prepare_xaction_buffers(struct ahash_request *req, + struct al_crypto_sw_desc *desc, + int to_hash, int src_nents, + int *buflen, int *src_idx) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req); + struct al_crypto_transaction *xaction = &desc->hal_xaction; + + dev_dbg(to_dev(ctx->chan), "%s: to_hash=%d buflen=%d src_nents=%d\n", + __func__, to_hash, *buflen, src_nents); + + *src_idx = 0; + + if (*buflen) { + desc->src_bufs[*src_idx].addr = req_ctx->buf_dma_addr; + desc->src_bufs[*src_idx].len = *buflen; + xaction->auth_in_len += desc->src_bufs[*src_idx].len; + (*src_idx)++; + } + + if (src_nents) + sg_map_to_xaction_buffers(req->src, desc->src_bufs, + to_hash - *buflen, src_idx); +} + +/****************************************************************************** + *****************************************************************************/ +static inline void ahash_update_stats(struct al_crypto_transaction *xaction, + struct al_crypto_chan *chan) +{ + AL_CRYPTO_STATS_INC(chan->stats_prep.ahash_reqs, 1); + AL_CRYPTO_STATS_INC(chan->stats_prep.ahash_bytes, xaction->auth_in_len); + + if (xaction->auth_in_len <= 512) + AL_CRYPTO_STATS_INC(chan->stats_prep.ahash_reqs_le512, 1); + else if ((xaction->auth_in_len > 512) && (xaction->auth_in_len <= 2048)) + AL_CRYPTO_STATS_INC(chan->stats_prep.ahash_reqs_512_2048, 1); + else if ((xaction->auth_in_len > 2048) && (xaction->auth_in_len <= 4096)) + AL_CRYPTO_STATS_INC(chan->stats_prep.ahash_reqs_2048_4096, 1); + else + AL_CRYPTO_STATS_INC(chan->stats_prep.ahash_reqs_gt4096, 1); +} + +/****************************************************************************** + *****************************************************************************/ +static inline void ahash_req_prepare_xaction(struct ahash_request *req, + struct al_crypto_sw_desc *desc, + int to_hash, int src_nents, + uint8_t *buf, int *buflen) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req); + struct al_crypto_chan *chan = ctx->chan; + struct al_crypto_transaction *xaction; + unsigned int digestsize = crypto_ahash_digestsize(ahash); + unsigned int ivsize; + int src_idx; + + dev_dbg(to_dev(chan), "%s: to_hash=%d src_ents=%d buflen=%d digestsize=%d first=%d last=%d\n", + __func__, to_hash, src_nents, *buflen, digestsize, req_ctx->first, req_ctx->last); + + /* In SHA384 the ivsize is 64 bytes and not 48 bytes. */ + ivsize = (digestsize == SHA384_DIGEST_SIZE) ? + SHA512_DIGEST_SIZE : digestsize; + + /* prepare hal transaction */ + xaction = &desc->hal_xaction; + memset(xaction, 0, sizeof(struct al_crypto_transaction)); + xaction->auth_sign_in.len = 0; + xaction->auth_fl_valid = AL_TRUE; + xaction->auth_in_off = 0; + /* if first, there's no input intermediate */ + if (unlikely(req_ctx->first)) { + req_ctx->first = false; + xaction->auth_first = AL_TRUE; + xaction->auth_iv_in.len = 0; + xaction->auth_iv_in.addr = (al_phys_addr_t)(uintptr_t)NULL; + } else { + xaction->auth_first = AL_FALSE; + xaction->auth_iv_in.addr = xaction->auth_iv_out.addr = req_ctx->interm_dma_addr; + xaction->auth_iv_in.len = xaction->auth_iv_out.len = ivsize; + } + + if (unlikely(req_ctx->last)) { + xaction->auth_last = AL_TRUE; + xaction->auth_sign_out.addr = req_ctx->interm_dma_addr; + xaction->auth_sign_out.len = digestsize; + xaction->auth_iv_out.len = 0; + xaction->auth_iv_out.addr = (al_phys_addr_t)(uintptr_t)NULL; + xaction->auth_bcnt = req_ctx->hashed_len; + + /* count the first hmac key^ipad block */ + if (ctx->sa.auth_hmac_en) + xaction->auth_bcnt += crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); + } else { + xaction->auth_last = AL_FALSE; + xaction->auth_iv_out.addr = req_ctx->interm_dma_addr; + xaction->auth_iv_out.len = ivsize; + xaction->auth_sign_out.len = 0; + xaction->auth_bcnt = 0; + } + + xaction->dir = AL_CRYPT_ENCRYPT; + xaction->auth_in_len = 0; + + ahash_req_prepare_xaction_buffers(req, desc, to_hash, src_nents, buflen, &src_idx); + + BUG_ON(src_idx > AL_SSM_MAX_SRC_DESCS); + + xaction->src_size = xaction->auth_in_len; + xaction->src.bufs = &desc->src_bufs[0]; + xaction->src.num = src_idx; + + dev_dbg(to_dev(chan), "%s: ctx->cache_state.cached=%d\n", + __func__, ctx->cache_state.cached); + + if (!ctx->cache_state.cached) { + xaction->sa_indx = al_crypto_cache_replace_lru(chan, &ctx->cache_state, NULL); + xaction->sa_in.addr = ctx->hw_sa_dma_addr; + xaction->sa_in.len = sizeof(struct al_crypto_hw_sa); + } else { + al_crypto_cache_update_lru(chan, &ctx->cache_state); + xaction->sa_indx = ctx->cache_state.idx; + xaction->sa_in.len = 0; + } + + dev_dbg(to_dev(chan), "%s: sa_op=%d, auth_type=%d, sha2_mode=%d\n", + __func__, ctx->sa.sa_op, ctx->sa.auth_type, ctx->sa.sha2_mode); + + xaction->flags = AL_SSM_INTERRUPT; + + ahash_update_stats(xaction, chan); +} + +/****************************************************************************** + *****************************************************************************/ +/* Main hash processing function that handles update/final/finup and digest + * + * Implementation is based on the assumption that the caller waits for + * completion of every operation before issuing the next operation + */ +static int ahash_process_req(struct ahash_request *req, unsigned int nbytes) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req); + unsigned int digestsize = crypto_ahash_digestsize(ahash); + struct al_crypto_chan *chan = ctx->chan; + uint8_t *buf = req_ctx->current_buf ? req_ctx->buf_1 : req_ctx->buf_0; + int *buflen = req_ctx->current_buf ? &req_ctx->buflen_1 : &req_ctx->buflen_0; + uint8_t *next_buf = req_ctx->current_buf ? req_ctx->buf_0 : req_ctx->buf_1; + int *next_buflen = req_ctx->current_buf ? &req_ctx->buflen_0 : &req_ctx->buflen_1; + int in_len = *buflen + nbytes; + int to_hash, idx; + int src_nents = 0; + struct al_crypto_sw_desc *desc; + int src_sg_nents = sg_nents(req->src); + int rc = 0; + + dev_dbg(to_dev(chan), + "%s: nbytes=%d first=%d last=%d inlen=%d buflen=%d next_buflen=%d src_sg_nents=%d auth_hmac_en=%d\n", + __func__, nbytes, req_ctx->first, req_ctx->last, in_len, *buflen, + *next_buflen, src_sg_nents, ctx->sa.auth_hmac_en); + + if (!req_ctx->last) { + /* if aligned, do not hash last block */ + *next_buflen = (in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1)) ? + : crypto_tfm_alg_blocksize(&ahash->base); + to_hash = in_len - *next_buflen; + + /* Ignore not last empty update requests */ + if (unlikely(in_len == 0)) + return rc; + } else { + if (unlikely(in_len == 0)) { + if (!ctx->sa.auth_hmac_en) + zero_message_result_copy(req); + else { + uint8_t *ipad = ctx->hmac_pads; + uint8_t *opad = ctx->hmac_pads + crypto_shash_descsize(ctx->sw_hash); + + struct shash_desc *desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(ctx->sw_hash), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + dev_dbg(to_dev(chan), "%s:%d: digestsize=%d\n", + __func__, __LINE__, crypto_shash_digestsize(ctx->sw_hash)); + + desc->tfm = ctx->sw_hash; + crypto_shash_set_flags(ctx->sw_hash, + crypto_shash_get_flags(ctx->sw_hash) & CRYPTO_TFM_REQ_MAY_SLEEP); + rc = crypto_shash_init(desc) ? : + crypto_shash_import(desc, ipad) ? : + crypto_shash_final(desc, req->result) ? : + crypto_shash_import(desc, opad) ? : + crypto_shash_finup(desc, req->result, + crypto_shash_digestsize(ctx->sw_hash), + req->result); + kfree(desc); + } + return rc; + } + + *next_buflen = 0; + to_hash = in_len; + } + + dev_dbg(to_dev(chan), "%s: to_hash=%d buflen=%d next_buflen=%d\n", + __func__, to_hash, *buflen, *next_buflen); + + if (!to_hash) { + sg_copy_to_buffer(req->src, src_sg_nents, buf + *buflen, nbytes); + *buflen = *next_buflen; + return rc; + } + + if (*buflen) { + req_ctx->buf_dma_addr = dma_map_single(to_dev(chan), buf, + *buflen, DMA_TO_DEVICE); + if (dma_mapping_error(to_dev(chan), req_ctx->buf_dma_addr)) + return -ENOMEM; + req_ctx->buf_dma_len = *buflen; + } else + req_ctx->buf_dma_len = 0; + + spin_lock_bh(&chan->prep_lock); + if (likely(al_crypto_get_sw_desc(chan, 1) == 0)) + idx = chan->head; + else { + spin_unlock_bh(&chan->prep_lock); + if (req_ctx->buf_dma_len) + dma_unmap_single(to_dev(chan), + req_ctx->buf_dma_addr, + req_ctx->buf_dma_len, + DMA_TO_DEVICE); + return -ENOSPC; + } + + if (*next_buflen) { + sg_pcopy_to_buffer(req->src, src_sg_nents, next_buf, + *next_buflen, nbytes - *next_buflen); + req_ctx->current_buf = !req_ctx->current_buf; + } + + if (nbytes) { + BUG_ON(nbytes < (*next_buflen)); + + src_nents = sg_nents_for_len(req->src, nbytes - (*next_buflen)); + + dev_dbg(to_dev(chan), "%s: src_nents=%d\n", __func__, src_nents); + + dma_map_sg(to_dev(chan), req->src, src_nents, DMA_TO_DEVICE); + } + + chan->sw_desc_num_locked = 1; + chan->tx_desc_produced = 0; + + desc = al_crypto_get_ring_ent(chan, idx); + desc->req = (void *)req; + desc->req_type = AL_CRYPTO_REQ_AHASH; + desc->src_nents = src_nents; + + ahash_req_prepare_xaction(req, desc, to_hash, src_nents, buf, buflen); + + /* send crypto transaction to engine */ + rc = al_crypto_dma_prepare(chan->hal_crypto, chan->idx, &desc->hal_xaction); + if (unlikely(rc != 0)) { + dev_err(to_dev(chan), "%s: al_crypto_dma_prepare failed with %d\n", + __func__, rc); + + al_crypto_dma_unmap_ahash(chan, desc, digestsize); + + spin_unlock_bh(&chan->prep_lock); + return rc; + } + + chan->tx_desc_produced += desc->hal_xaction.tx_descs_count; + + al_crypto_tx_submit(chan); + + req_ctx->hashed_len += to_hash; + + spin_unlock_bh(&chan->prep_lock); + + return -EINPROGRESS; +} + +/****************************************************************************** + *****************************************************************************/ +static int ahash_update(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req); + + dev_dbg(to_dev(ctx->chan), "%s:%d\n", __func__, __LINE__); + + req_ctx->last = false; + + return ahash_process_req(req, req->nbytes); +} + +/****************************************************************************** + *****************************************************************************/ +static int ahash_final(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req); + + dev_dbg(to_dev(ctx->chan), "%s:%d\n", __func__, __LINE__); + + req_ctx->last = true; + + return ahash_process_req(req, 0); +} + +/****************************************************************************** + *****************************************************************************/ +static int ahash_finup(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req); + + dev_dbg(to_dev(ctx->chan), "%s:%d\n", __func__, __LINE__); + + req_ctx->last = true; + + return ahash_process_req(req, req->nbytes); +} + +/****************************************************************************** + *****************************************************************************/ +static int ahash_digest(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req); + + dev_dbg(to_dev(ctx->chan), "%s:%d\n", __func__, __LINE__); + + ahash->init(req); + req_ctx->last = true; + + return ahash_process_req(req, req->nbytes); +} + +/****************************************************************************** + *****************************************************************************/ +static int ahash_export(struct ahash_request *req, void *out) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req); + + dev_dbg(to_dev(ctx->chan), "%s:%d\n", __func__, __LINE__); + + memcpy(out, req_ctx, sizeof(*req_ctx)); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static int ahash_import(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash); + struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req); + + dev_dbg(to_dev(ctx->chan), "%s:%d\n", __func__, __LINE__); + + memcpy(req_ctx, in, sizeof(*req_ctx)); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +/* Generate intermediate hash of hmac^opad and hmac^ipad using sw hash engine + * and place the results in ctx->sa. + */ +int hmac_setkey(struct al_crypto_ctx *ctx, const u8 *key, + unsigned int keylen, unsigned int sw_hash_interm_offset, + unsigned int sw_hash_interm_size) +{ + unsigned int blocksize, digestsize, descsize; + int i, rc; + + dev_dbg(to_dev(ctx->chan), "%s: keylen=%d sw_hash_interm_offset=%d sw_hash_interm_size=%d\n", + __func__, keylen, sw_hash_interm_offset, sw_hash_interm_size); + + /* Based on code from the hmac module */ + blocksize = crypto_shash_blocksize(ctx->sw_hash); + digestsize = crypto_shash_digestsize(ctx->sw_hash); + descsize = crypto_shash_descsize(ctx->sw_hash); + + dev_dbg(to_dev(ctx->chan), "%s: blocksize=%d digestsize=%d descsize=%d\n", + __func__, blocksize, digestsize, descsize); + + { + uint8_t *ipad = ctx->hmac_pads; + uint8_t *opad = ctx->hmac_pads + descsize; + + struct shash_desc *desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(ctx->sw_hash), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + desc->tfm = ctx->sw_hash; + crypto_shash_set_flags(ctx->sw_hash, crypto_shash_get_flags(ctx->sw_hash) & + CRYPTO_TFM_REQ_MAY_SLEEP); + + /* hash the key if longer than blocksize */ + if (keylen > blocksize) { + int err; + + err = crypto_shash_digest(desc, key, keylen, ipad); + if (err) { + kfree(desc); + return err; + } + + keylen = digestsize; + } else + memcpy(ipad, key, keylen); + + memset(ipad + keylen, 0, blocksize - keylen); + memcpy(opad, ipad, blocksize); + + /* Generate XORs with ipad and opad */ + for (i = 0; i < blocksize; i++) { + ipad[i] ^= AL_CRYPTO_HASH_HMAC_IPAD; + opad[i] ^= AL_CRYPTO_HASH_HMAC_OPAD; + } + + /* Generate intermediate results using SW hash */ + rc = crypto_shash_init(desc) ? : + crypto_shash_update(desc, ipad, blocksize) ? : + crypto_shash_export(desc, ipad) ? : + crypto_shash_init(desc) ? : + crypto_shash_update(desc, opad, blocksize) ? : + crypto_shash_export(desc, opad); + + if (rc == 0) { + unsigned int offset = sw_hash_interm_offset; + unsigned int size = sw_hash_interm_size; + + /* Copy intermediate results to SA */ + memcpy(ctx->sa.hmac_iv_in, ipad + offset, size); + memcpy(ctx->sa.hmac_iv_out, opad + offset, size); + + print_hex_dump_debug(KBUILD_MODNAME ": hmac_iv_in: ", DUMP_PREFIX_OFFSET, + 16, 1, ctx->sa.hmac_iv_in, size, false); + print_hex_dump_debug(KBUILD_MODNAME ": hmac_iv_out: ", DUMP_PREFIX_OFFSET, + 16, 1, ctx->sa.hmac_iv_out, size, false); + + ctx->sa.auth_hmac_en = true; + } + + kfree(desc); + } + return rc; +} + +/****************************************************************************** + *****************************************************************************/ +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen) +{ + struct ahash_alg *alg = __crypto_ahash_alg(crypto_ahash_tfm(ahash)->__crt_alg); + struct al_crypto_hash *al_crypto_hash = container_of(alg, struct al_crypto_hash, ahash_alg); + struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash); + int rc; + + dev_dbg(to_dev(ctx->chan), "%s: keylen=%d sw_hash=%d\n", + __func__, keylen, !!ctx->sw_hash); + + if (!ctx->sw_hash) + return 0; + + rc = hmac_setkey(ctx, key, keylen, + al_crypto_hash->sw_hash_interm_offset, + al_crypto_hash->sw_hash_interm_size); + if (rc == 0) { + al_crypto_hw_sa_init(&ctx->sa, ctx->hw_sa); + + /* mark the sa as not cached, will update in next xaction */ + spin_lock_bh(&ctx->chan->prep_lock); + if (ctx->cache_state.cached) + al_crypto_cache_remove_lru(ctx->chan, &ctx->cache_state); + spin_unlock_bh(&ctx->chan->prep_lock); + } + + return rc; +} + +/****************************************************************************** + *****************************************************************************/ +static struct al_crypto_hash *al_crypto_hash_alloc(struct al_crypto_device *device, + struct al_crypto_hash_template *template, + bool keyed) +{ + struct al_crypto_hash *t_alg; + struct ahash_alg *halg; + struct crypto_alg *alg; + + t_alg = kzalloc(sizeof(struct al_crypto_hash), GFP_KERNEL); + if (!t_alg) { + dev_err(&device->pdev->dev, "failed to allocate t_alg\n"); + return ERR_PTR(-ENOMEM); + } + + t_alg->ahash_alg = template->template_ahash; + halg = &t_alg->ahash_alg; + alg = &halg->halg.base; + + dev_dbg(&device->pdev->dev, "%s: name=%s sw_hash_name=%s keyed=%d statesize=%d\n", + __func__, alg->cra_name, t_alg->sw_hash_name, keyed, halg->halg.statesize); + + if (keyed) { + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", + template->hmac_name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->hmac_driver_name); + snprintf(t_alg->sw_hash_name, CRYPTO_MAX_ALG_NAME, "%s", + template->sw_hash_name); + } else { + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", + template->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->driver_name); + snprintf(t_alg->sw_hash_name, CRYPTO_MAX_ALG_NAME, "%s", + ""); + } + + if (!keyed) + halg->setkey = NULL; + + alg->cra_module = THIS_MODULE; + alg->cra_init = al_crypto_hash_cra_init; + alg->cra_exit = al_crypto_hash_cra_exit; + alg->cra_priority = AL_CRYPTO_CRA_PRIORITY; + alg->cra_blocksize = template->blocksize; + alg->cra_alignmask = 0; + alg->cra_ctxsize = sizeof(struct al_crypto_ctx); + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK; + + t_alg->auth_type = template->auth_type; + t_alg->sha2_mode = template->sha2_mode; + t_alg->sa_op = template->sa_op; + t_alg->device = device; + t_alg->sw_hash_interm_offset = template->sw_hash_interm_offset; + t_alg->sw_hash_interm_size = template->sw_hash_interm_size; + + return t_alg; +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_hash_init(struct al_crypto_device *device) +{ + int i; + int err = 0; + + INIT_LIST_HEAD(&device->hash_list); + + /* tfm count is initialized in alg, move to core?? */ + /* atomic_set(&device->tfm_count, -1); */ + + /* register crypto algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { + struct al_crypto_hash *t_alg; + + /* register hmac version */ + t_alg = al_crypto_hash_alloc(device, &driver_hash[i], true); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + dev_warn(&device->pdev->dev, + "%s alg allocation failed with %d\n", + driver_hash[i].driver_name, + err); + continue; + } + + err = crypto_register_ahash(&t_alg->ahash_alg); + if (err) { + dev_warn(&device->pdev->dev, + "%s alg registration failed with %d\n", + t_alg->ahash_alg.halg.base.cra_driver_name, + err); + kfree(t_alg); + } else + list_add_tail(&t_alg->entry, &device->hash_list); + + /* register unkeyed version */ + t_alg = al_crypto_hash_alloc(device, &driver_hash[i], false); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + dev_warn(&device->pdev->dev, + "%s alg allocation failed with err %d\n", + driver_hash[i].driver_name, err); + continue; + } + + err = crypto_register_ahash(&t_alg->ahash_alg); + if (err) { + dev_warn(&device->pdev->dev, + "%s alg registration failed with %d\n", + t_alg->ahash_alg.halg.base.cra_driver_name, + err); + kfree(t_alg); + } else + list_add_tail(&t_alg->entry, &device->hash_list); + } + + if (!list_empty(&device->hash_list)) + dev_info(&device->pdev->dev, + "hash algorithms registered in /proc/crypto\n"); + + return err; +} + +/****************************************************************************** + *****************************************************************************/ +void al_crypto_hash_terminate(struct al_crypto_device *device) +{ + struct al_crypto_hash *t_alg, *n; + + if (!device->hash_list.next) + return; + + list_for_each_entry_safe(t_alg, n, &device->hash_list, entry) { + crypto_unregister_ahash(&t_alg->ahash_alg); + list_del(&t_alg->entry); + kfree(t_alg); + } +} diff --git a/target/linux/alpine/files/drivers/crypto/al/al_crypto_main.c b/target/linux/alpine/files/drivers/crypto/al/al_crypto_main.c new file mode 100644 index 00000000000000..1c92e6ad25d4d4 --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/al_crypto_main.c @@ -0,0 +1,320 @@ +/* + * drivers/crypto/al_crypto_main.c + * + * Annapurna Labs Crypto driver - pci enumeration and init invocation + * + * Copyright (C) 2012 Annapurna Labs Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include +#include +#include + +#include + +#include "al_crypto.h" +#include "al_crypto_module_params.h" + +MODULE_VERSION(AL_CRYPTO_VERSION); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Annapurna Labs"); + +#define DRV_NAME "al_crypto" +#define MAX_HW_DESCS_PER_SW_DECS 4 + +enum { + /* BAR's are enumerated in terms of pci_resource_start() terms */ + AL_CRYPTO_UDMA_BAR = 0, + AL_CRYPTO_APP_BAR = 4 +}; + +static int al_crypto_pci_probe( + struct pci_dev *pdev, + const struct pci_device_id *id); + +static void al_crypto_pci_remove( + struct pci_dev *pdev); + +static void al_crypto_flr(struct pci_dev *pdev); + + +static const struct pci_device_id al_crypto_pci_tbl[] = { + { PCI_VDEVICE(AMAZON_ANNAPURNA_LABS, PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_CRYPTO), }, + { PCI_VDEVICE(AMAZON_ANNAPURNA_LABS, PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_CRYPTO_VF), }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, al_crypto_pci_tbl); + +static struct pci_driver al_crypto_pci_driver = { + .name = DRV_NAME, + .id_table = al_crypto_pci_tbl, + .probe = al_crypto_pci_probe, + .remove = al_crypto_pci_remove, +}; + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_pci_probe( + struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int status = 0; + int sriov_crc_channels = al_crypto_get_crc_channels(); + void __iomem * const *iomap; + struct device *dev = &pdev->dev; + struct al_crypto_device *device; + int bar_reg; + + dev_dbg(dev, "%s(%p, %p)\n", __func__, pdev, id); + + if (min(al_crypto_get_rx_descs_order(),al_crypto_get_tx_descs_order()) < + (MAX_HW_DESCS_PER_SW_DECS + al_crypto_get_ring_alloc_order())) { + dev_err(dev, "%s: Too small HW Q can lead to unexpected behavior " + "upon queue overflow\n",__func__); + } + + al_crypto_flr(pdev); + + status = pcim_enable_device(pdev); + if (status) { + pr_err("%s: pcim_enable_device failed!\n", __func__); + goto done; + } + + bar_reg = pdev->is_physfn ? + (1 << AL_CRYPTO_UDMA_BAR) | (1 << AL_CRYPTO_APP_BAR ) : + (1 << AL_CRYPTO_UDMA_BAR); + + status = pcim_iomap_regions( + pdev, + bar_reg, + DRV_NAME); + if (status) { + pr_err("%s: pcim_iomap_regions failed!\n", __func__); + goto done; + } + + iomap = pcim_iomap_table(pdev); + if (!iomap) { + status = -ENOMEM; + goto done; + } + + status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (status) + goto done; + + device = devm_kzalloc(dev, sizeof(struct al_crypto_device), GFP_KERNEL); + if (!device) { + status = -ENOMEM; + goto done; + } + + device->pdev = pdev; + + pci_set_master(pdev); + pci_set_drvdata(pdev, device); + dev_set_drvdata(dev, device); + +#ifdef CONFIG_BTRFS_AL_FAST_CRC_DMA + BUG_ON(!al_crypto_get_use_virtual_function()); +#endif + /* + * When VF is used the PF is dedicated to crc and the VF is dedicated + * to crypto + */ + if (al_crypto_get_use_virtual_function()) { + if (pdev->is_physfn && (pci_sriov_get_totalvfs(pdev) > 0)) + sriov_crc_channels = 0; + else if (pdev->is_virtfn) { + sriov_crc_channels = al_crypto_get_max_channels(); +#ifdef CONFIG_BTRFS_AL_FAST_CRC_DMA + BUG_ON(sriov_crc_channels < NR_CPUS); +#endif + } + } + + device->max_channels = al_crypto_get_max_channels(); + device->crc_channels = sriov_crc_channels; + + if (al_crypto_get_use_virtual_function() && pdev->is_physfn && + (pci_sriov_get_totalvfs(pdev) > 0)) { + status = pci_enable_sriov(pdev, 1); + if (status) { + dev_err(dev, "%s: pci_enable_sriov failed, status %d\n", + __func__, status); + goto done; + } + } + + status = al_crypto_core_init( + device, + iomap[AL_CRYPTO_UDMA_BAR], + pdev->is_physfn ? iomap[AL_CRYPTO_APP_BAR] : NULL); + if (status) { + dev_err(dev, "%s: al_crypto_core_init failed\n", __func__); + goto done; + } + + status = al_crypto_sysfs_init(device); + if (status) { + dev_err(dev, "%s: al_dma_sysfs_init failed\n", __func__); + goto err_sysfs_init; + } + + if (device->crc_channels < device->max_channels) { + status = al_crypto_skcipher_init(device); + if (status) { + dev_err(dev, "%s: al_crypto_skcipher_init failed\n", + __func__); + goto err_skcipher_init; + } + + status = al_crypto_aead_init(device); + if (status) { + dev_err(dev, "%s: al_crypto_aead_init failed\n", + __func__); + goto err_aead_init; + } + + status = al_crypto_hash_init(device); + if (status) { + dev_err(dev, "%s: al_crypto_hash_init failed\n", + __func__); + goto err_hash_init; + } + } else + dev_info(dev, "%s: Skipping skcipher/aead/hash initialization, " \ + "no allocated channels\n", __func__); + + if (device->crc_channels > 0) { + status = al_crypto_crc_init(device); + if (status) { + dev_err(dev, "%s: al_crypto_hash_init failed\n", + __func__); + goto err_crc_init; + } + } else + dev_info(dev, "%s: Skipping crc initialization, " \ + "no allocated channels\n", __func__); + + goto done; + + +err_crc_init: + al_crypto_hash_terminate(device); +err_hash_init: + al_crypto_skcipher_terminate(device); +err_aead_init: + al_crypto_aead_terminate(device); +err_skcipher_init: + al_crypto_sysfs_terminate(device); +err_sysfs_init: + al_crypto_core_terminate(device); +done: + return status; +} + +int al_crypto_read_pcie_config(void *handle, int where, uint32_t *val) +{ + /* handle is a pointer to the pci_dev */ + pci_read_config_dword((struct pci_dev *)handle, where, val); + return 0; +} + +int al_crypto_write_pcie_config(void *handle, int where, uint32_t val) +{ + /* handle is a pointer to the pci_dev */ + pci_write_config_dword((struct pci_dev *)handle, where, val); + return 0; +} + +int al_crypto_write_pcie_flr(void *handle) +{ + /* handle is a pointer to the pci_dev */ + __pci_reset_function_locked((struct pci_dev *)handle); + udelay(1000); + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static void inline al_crypto_flr(struct pci_dev *pdev) +{ + al_pcie_perform_flr(al_crypto_read_pcie_config, + al_crypto_write_pcie_config, + al_crypto_write_pcie_flr, + pdev); +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_pci_remove(struct pci_dev *pdev) +{ + struct al_crypto_device *device = pci_get_drvdata(pdev); + + if (!device) + return; + + dev_dbg(&pdev->dev, "Removing dma\n"); + + if (device->pdev->is_physfn) + pci_disable_sriov(device->pdev); + + al_crypto_crc_terminate(device); + + al_crypto_hash_terminate(device); + + al_crypto_sysfs_terminate(device); + + al_crypto_aead_terminate(device); + + al_crypto_skcipher_terminate(device); + + al_crypto_core_terminate(device); + + pci_disable_device(pdev); +} + +/****************************************************************************** + *****************************************************************************/ +static int __init al_crypto_init_module(void) +{ + int err; + + pr_info( + "%s: Annapurna Labs Crypto Driver %s\n", + DRV_NAME, + AL_CRYPTO_VERSION); + + err = pci_register_driver(&al_crypto_pci_driver); + + return err; +} +module_init(al_crypto_init_module); + +/****************************************************************************** + *****************************************************************************/ +static void __exit al_crypto_exit_module(void) +{ + pci_unregister_driver(&al_crypto_pci_driver); +} +module_exit(al_crypto_exit_module); diff --git a/target/linux/alpine/files/drivers/crypto/al/al_crypto_module_params.c b/target/linux/alpine/files/drivers/crypto/al/al_crypto_module_params.c new file mode 100644 index 00000000000000..046e75729016ad --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/al_crypto_module_params.c @@ -0,0 +1,105 @@ +/* + * drivers/crypto/al/al_crypto_module_params.c + * + * Annapurna Labs Crypto driver - module params + * + * Copyright (C) 2013 Annapurna Labs Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include + +#include "al_crypto.h" +#include "al_crypto_module_params.h" + +static bool use_virtual_function = true; +module_param(use_virtual_function, bool, 0644); +MODULE_PARM_DESC( + use_virtual_function, + "use the SR-IOV capability of the crypto engine (default: true)" + "If we use the VF we will have 4 crc channels and 4 crypto channels"); + +static int crc_channels = 1; +module_param(crc_channels, int, 0644); +MODULE_PARM_DESC( + crc_channels, + "number of crc channels (queues) to enable (default: 1)"); + +static int max_channels = AL_CRYPTO_DMA_MAX_CHANNELS; +module_param(max_channels, int, 0644); +MODULE_PARM_DESC( + max_channels, + "maximum number of channels (queues) to enable (default: 4)"); + +static int ring_alloc_order = 10; +module_param(ring_alloc_order, int, 0644); +MODULE_PARM_DESC( + ring_alloc_order, + "allocate 2^n descriptors per channel" + " (default: 10 max: 16)"); + +static int tx_descs_order = 14; +module_param(tx_descs_order, int, 0644); +MODULE_PARM_DESC( + tx_descs_order, + "allocate 2^n of descriptors in Tx queue (default: 14)"); + +static int rx_descs_order = 14; +module_param(rx_descs_order, int, 0644); +MODULE_PARM_DESC( + rx_descs_order, + "allocate 2^n of descriptors in Rx queue (default: 14)"); + +static bool use_single_msix; +module_param(use_single_msix, bool, 0644); +MODULE_PARM_DESC( + use_single_msix, + "Use single msix (one msi-x per group and not one per queue)"); + +bool al_crypto_get_use_virtual_function(void) +{ + return use_virtual_function; +} + +bool al_crypto_get_use_single_msix(void) +{ + return use_single_msix; +} + +int al_crypto_get_crc_channels(void) +{ + return crc_channels; +} + +int al_crypto_get_max_channels(void) +{ + return max_channels; +} + +int al_crypto_get_ring_alloc_order(void) +{ + return ring_alloc_order; +} + +int al_crypto_get_tx_descs_order(void) +{ + return tx_descs_order; +} + +int al_crypto_get_rx_descs_order(void) +{ + return rx_descs_order; +} diff --git a/target/linux/alpine/files/drivers/crypto/al/al_crypto_module_params.h b/target/linux/alpine/files/drivers/crypto/al/al_crypto_module_params.h new file mode 100644 index 00000000000000..5879b173836dbf --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/al_crypto_module_params.h @@ -0,0 +1,40 @@ +/* + * drivers/crypto/al/al_crypto_module_params.h + * + * Annapurna Labs Crypto driver - module params + * + * Copyright (C) 2013 Annapurna Labs Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __AL_CRYPTO_MODULE_PARAMS_H__ +#define __AL_CRYPTO_MODULE_PARAMS_H__ + +bool al_crypto_get_use_virtual_function(void); + +int al_crypto_get_crc_channels(void); + +int al_crypto_get_max_channels(void); + +int al_crypto_get_ring_alloc_order(void); + +int al_crypto_get_tx_descs_order(void); + +int al_crypto_get_rx_descs_order(void); + +bool al_crypto_get_use_single_msix(void); + +#endif /* __AL_CRYPTO_MODULE_PARAMS_H__ */ diff --git a/target/linux/alpine/files/drivers/crypto/al/al_crypto_skcipher.c b/target/linux/alpine/files/drivers/crypto/al/al_crypto_skcipher.c new file mode 100644 index 00000000000000..a3ef84a4bd0b16 --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/al_crypto_skcipher.c @@ -0,0 +1,961 @@ +/* + * drivers/crypto/al_crypto_skcipher.c + * + * Annapurna Labs Crypto driver - ablckcipher/aead algorithms + * + * Copyright (C) 2012 Annapurna Labs Ltd. + * + * Algorithm registration code and chained scatter/gather lists + * handling based on caam driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "linux/export.h" +#include "linux/crypto.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "al_crypto.h" + +#define AL_CRYPTO_CRA_PRIORITY 300 + +static int al_crypto_init_tfm(struct crypto_skcipher *tfm); + +static void al_crypto_exit_tfm(struct crypto_skcipher *tfm); + +static int al_crypto_setkey_des(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen); + +static int al_crypto_setkey_aes(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen); + +static int al_crypto_do_crypt(struct skcipher_request *req, bool lock); + +static int al_crypto_encrypt(struct skcipher_request *req); + +static int al_crypto_decrypt(struct skcipher_request *req); + +struct al_crypto_skcipher_req_ctx { + enum al_crypto_dir dir; + u8 *backup_iv; + unsigned int remaining; + unsigned int cryptlen; + struct scatterlist* src; + struct scatterlist* dst; +}; + +struct al_crypto_alg { + struct list_head entry; + struct al_crypto_device *device; + enum al_crypto_sa_enc_type enc_type; + enum al_crypto_sa_op sa_op; + struct skcipher_alg alg; +}; + +struct al_crypto_skcipher_template { + char name[CRYPTO_MAX_ALG_NAME]; + char driver_name[CRYPTO_MAX_ALG_NAME]; + unsigned int blocksize; + enum al_crypto_sa_enc_type enc_type; + enum al_crypto_sa_op sa_op; + struct skcipher_alg alg; +}; + +static struct al_crypto_skcipher_template driver_algs[] = { + { + .name = "cbc(aes)", + .driver_name = "cbc-aes-al", + .blocksize = AES_BLOCK_SIZE, + .enc_type = AL_CRYPT_AES_CBC, + .sa_op = AL_CRYPT_ENC_ONLY, + .alg = { + .init = al_crypto_init_tfm, + .exit = al_crypto_exit_tfm, + .setkey = al_crypto_setkey_aes, + .encrypt = al_crypto_encrypt, + .decrypt = al_crypto_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + }, + { + .name = "ecb(aes)", + .driver_name = "ecb-aes-al", + .blocksize = AES_BLOCK_SIZE, + .enc_type = AL_CRYPT_AES_ECB, + .sa_op = AL_CRYPT_ENC_ONLY, + .alg = { + .init = al_crypto_init_tfm, + .exit = al_crypto_exit_tfm, + .setkey = al_crypto_setkey_aes, + .encrypt = al_crypto_encrypt, + .decrypt = al_crypto_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + }, + { + .name = "ctr(aes)", + .driver_name = "ctr-aes-al", + .blocksize = 1, + .enc_type = AL_CRYPT_AES_CTR, + .sa_op = AL_CRYPT_ENC_ONLY, + .alg = { + .init = al_crypto_init_tfm, + .exit = al_crypto_exit_tfm, + .setkey = al_crypto_setkey_aes, + .encrypt = al_crypto_encrypt, + .decrypt = al_crypto_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + }, + { + .name = "cbc(des)", + .driver_name = "cbc-des-al", + .blocksize = DES_BLOCK_SIZE, + .enc_type = AL_CRYPT_DES_CBC, + .sa_op = AL_CRYPT_ENC_ONLY, + .alg = { + .init = al_crypto_init_tfm, + .exit = al_crypto_exit_tfm, + .setkey = al_crypto_setkey_des, + .encrypt = al_crypto_encrypt, + .decrypt = al_crypto_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + }, + }, + { + .name = "ecb(des)", + .driver_name = "ecb-des-al", + .blocksize = DES_BLOCK_SIZE, + .enc_type = AL_CRYPT_DES_ECB, + .sa_op = AL_CRYPT_ENC_ONLY, + .alg = { + .init = al_crypto_init_tfm, + .exit = al_crypto_exit_tfm, + .setkey = al_crypto_setkey_des, + .encrypt = al_crypto_encrypt, + .decrypt = al_crypto_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = 0, + }, + }, + { + .name = "ecb(des3_ede)", + .driver_name = "ecb-des3-ede-al", + .blocksize = DES_BLOCK_SIZE, + .enc_type = AL_CRYPT_TRIPDES_ECB, + .sa_op = AL_CRYPT_ENC_ONLY, + .alg = { + .init = al_crypto_init_tfm, + .exit = al_crypto_exit_tfm, + .setkey = al_crypto_setkey_des, + .encrypt = al_crypto_encrypt, + .decrypt = al_crypto_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = 0, + }, + }, + { + .name = "cbc(des3_ede)", + .driver_name = "cbc-des3-ede-al", + .blocksize = DES_BLOCK_SIZE, + .enc_type = AL_CRYPT_TRIPDES_CBC, + .sa_op = AL_CRYPT_ENC_ONLY, + .alg = { + .init = al_crypto_init_tfm, + .exit = al_crypto_exit_tfm, + .setkey = al_crypto_setkey_des, + .encrypt = al_crypto_encrypt, + .decrypt = al_crypto_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + }, + }, +}; + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_init_tfm(struct crypto_skcipher *tfm) +{ + struct al_crypto_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct al_crypto_alg *al_crypto_alg = container_of(alg, struct al_crypto_alg, alg); + struct al_crypto_device *device = al_crypto_alg->device; + int chan_idx = atomic_inc_return(&device->tfm_count) % + (device->num_channels - device->crc_channels); + + dev_dbg(&device->pdev->dev, "%s: cra_name=%s\n", + __func__, tfm->base.__crt_alg->cra_name); + + memset(ctx, 0, sizeof(struct al_crypto_ctx)); + + ctx->chan = device->channels[chan_idx]; + + ctx->sa.enc_type = al_crypto_alg->enc_type; + ctx->sa.sa_op = al_crypto_alg->sa_op; + + ctx->hw_sa = dma_alloc_coherent(&device->pdev->dev, + sizeof(struct al_crypto_hw_sa), + &ctx->hw_sa_dma_addr, + GFP_KERNEL); + + crypto_skcipher_set_reqsize(tfm, sizeof(struct al_crypto_skcipher_req_ctx)); + + ctx->iv = dma_alloc_coherent(&device->pdev->dev, + AL_CRYPTO_MAX_IV_LENGTH, + &ctx->iv_dma_addr, + GFP_KERNEL); + + AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock); + AL_CRYPTO_STATS_INC(ctx->chan->stats_gen.skcipher_tfms, 1); + AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_exit_tfm(struct crypto_skcipher *tfm) +{ + struct al_crypto_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct al_crypto_alg *al_crypto_alg = container_of(alg, struct al_crypto_alg, alg); + struct al_crypto_device *device = al_crypto_alg->device; + + dev_dbg(&device->pdev->dev, "%s: cra_name=%s\n", + __func__, tfm->base.__crt_alg->cra_name); + + /* LRU list access has to be protected */ + spin_lock_bh(&ctx->chan->prep_lock); + if (ctx->cache_state.cached) + al_crypto_cache_remove_lru(ctx->chan, &ctx->cache_state); + spin_unlock_bh(&ctx->chan->prep_lock); + + if (ctx->hw_sa_dma_addr) + dma_free_coherent(&device->pdev->dev, + sizeof(struct al_crypto_hw_sa), + ctx->hw_sa, + ctx->hw_sa_dma_addr); + + if (ctx->iv_dma_addr) + dma_free_coherent(&device->pdev->dev, + AL_CRYPTO_MAX_IV_LENGTH, + ctx->iv, + ctx->iv_dma_addr); + + AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock); + AL_CRYPTO_STATS_DEC(ctx->chan->stats_gen.skcipher_tfms, 1); + AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock); +} + + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_setkey_des(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + struct al_crypto_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; + + dev_dbg(to_dev(ctx->chan), "%s: keylen=%d\n", __func__, keylen); + print_hex_dump_debug(KBUILD_MODNAME ": key: ", + DUMP_PREFIX_OFFSET, 16, 1, key, keylen, false); + + if ((ctx->sa.enc_type == AL_CRYPT_TRIPDES_CBC) || + (ctx->sa.enc_type == AL_CRYPT_TRIPDES_ECB)) { + ctx->sa.tripdes_m = AL_CRYPT_TRIPDES_EDE; + if (keylen != DES3_EDE_KEY_SIZE) + return -EINVAL; + } else { + ctx->sa.tripdes_m = 0; + if (keylen != DES_KEY_SIZE) + return -EINVAL; + + /* check for weak keys. */ + /* Weak keys are keys that cause the encryption mode of DES + * to act identically to the decryption mode of DES */ + ret = verify_skcipher_des_key(tfm, key); + if (ret) + return ret; + } + + /* TODO: optimize HAL to hold ptrs to save this memcpy */ + /* copy the key to the sa */ + memcpy(&ctx->sa.enc_key, key, keylen); + + al_crypto_hw_sa_init(&ctx->sa, ctx->hw_sa); + + /* mark the sa as not cached, will update in next xaction */ + spin_lock_bh(&ctx->chan->prep_lock); + if (ctx->cache_state.cached) + al_crypto_cache_remove_lru(ctx->chan, &ctx->cache_state); + spin_unlock_bh(&ctx->chan->prep_lock); + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_setkey_aes(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + struct al_crypto_ctx *ctx = crypto_skcipher_ctx(tfm); + + dev_dbg(to_dev(ctx->chan), "%s: keylen=%d\n", __func__, keylen); + print_hex_dump_debug(KBUILD_MODNAME ": key: ", + DUMP_PREFIX_OFFSET, 16, 1, key, keylen, false); + + if (al_crypto_keylen_to_sa_aes_ksize(keylen, &ctx->sa.aes_ksize)) + return -EINVAL; + + /* As for now we don't support GCM or CCM modes */ + BUG_ON((ctx->sa.enc_type == AL_CRYPT_AES_GCM) || + (ctx->sa.enc_type == AL_CRYPT_AES_CCM)); + + /* TODO: optimize HAL to hold ptrs to save this memcpy */ + /* copy the key to the sa */ + memcpy(&ctx->sa.enc_key, key, keylen); + + /* Sets the counter increment to 128 bit to be aligned with the + * linux implementation. We know it contradicts the NIST spec. + * If and when the linux will be aligned with the spec we should fix it + * too. + * This variable is relevant only for CTR, GCM and CCM modes*/ + ctx->sa.cntr_size = AL_CRYPT_CNTR_128_BIT; + + al_crypto_hw_sa_init(&ctx->sa, ctx->hw_sa); + + /* mark the sa as not cached, will update in next xaction */ + spin_lock_bh(&ctx->chan->prep_lock); + if (ctx->cache_state.cached) + al_crypto_cache_remove_lru(ctx->chan, &ctx->cache_state); + spin_unlock_bh(&ctx->chan->prep_lock); + + return 0; +} + + +/****************************************************************************** + *****************************************************************************/ +/* DMA unmap buffers for skcipher request + */ +static inline void al_crypto_dma_unmap(struct al_crypto_chan *chan, + struct skcipher_request *req, + int src_nents, int dst_nents, + struct al_crypto_sw_desc *desc) +{ + struct al_crypto_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + + dev_dbg(to_dev(chan), "%s: src_nents=%d dst_nents=%d\n", + __func__, src_nents, dst_nents); + + if (likely(rctx->src == rctx->dst)) { + dma_unmap_sg(to_dev(chan), rctx->src, src_nents, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(to_dev(chan), rctx->src, src_nents, DMA_TO_DEVICE); + dma_unmap_sg(to_dev(chan), rctx->dst, dst_nents, DMA_FROM_DEVICE); + } + + if (desc && desc->hal_xaction.enc_iv_in.len) + dma_unmap_single(to_dev(chan), + desc->hal_xaction.enc_iv_in.addr, + desc->hal_xaction.enc_iv_in.len, + DMA_TO_DEVICE); +} + +/****************************************************************************** + *****************************************************************************/ +static inline void al_crypto_update_stats(struct al_crypto_transaction *xaction, + struct al_crypto_chan *chan) +{ + if (xaction->dir == AL_CRYPT_ENCRYPT) { + AL_CRYPTO_STATS_INC(chan->stats_prep.skcipher_encrypt_reqs, 1); + AL_CRYPTO_STATS_INC(chan->stats_prep.skcipher_encrypt_bytes, + xaction->enc_in_len); + } else { + AL_CRYPTO_STATS_INC(chan->stats_prep.skcipher_decrypt_reqs, 1); + AL_CRYPTO_STATS_INC(chan->stats_prep.skcipher_decrypt_bytes, + xaction->enc_in_len); + } + + if (xaction->enc_in_len <= 512) + AL_CRYPTO_STATS_INC(chan->stats_prep.skcipher_reqs_le512, 1); + else if ((xaction->enc_in_len > 512) && (xaction->enc_in_len <= 2048)) + AL_CRYPTO_STATS_INC(chan->stats_prep.skcipher_reqs_512_2048, 1); + else if ((xaction->enc_in_len > 2048) && (xaction->enc_in_len <= 4096)) + AL_CRYPTO_STATS_INC(chan->stats_prep.skcipher_reqs_2048_4096, 1); + else + AL_CRYPTO_STATS_INC(chan->stats_prep.skcipher_reqs_gt4096, 1); +} + +/****************************************************************************** + *****************************************************************************/ +static inline void al_crypto_prepare_xaction_buffers(struct skcipher_request *req, + struct al_crypto_sw_desc *desc) +{ + struct al_crypto_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + struct al_crypto_transaction *xaction = &desc->hal_xaction; + int src_idx, dst_idx; + int i; + + src_idx = 0; + dst_idx = 0; + + sg_map_to_xaction_buffers(rctx->src, desc->src_bufs, rctx->cryptlen, &src_idx); + + if (likely(rctx->src == rctx->dst)) { + for (i = 0; i < src_idx; i++) + desc->dst_bufs[i] = desc->src_bufs[i]; + dst_idx = src_idx; + } else + sg_map_to_xaction_buffers(rctx->dst, desc->dst_bufs, + rctx->cryptlen, &dst_idx); + + xaction->src_size = xaction->enc_in_len = rctx->cryptlen; + xaction->src.bufs = &desc->src_bufs[0]; + xaction->src.num = src_idx; + xaction->dst.bufs = &desc->dst_bufs[0]; + xaction->dst.num = dst_idx; +} + +/****************************************************************************** + *****************************************************************************/ +/* Prepare crypto transaction to be processed by HAL and submit to HAL + * Grabs and releases producer lock for relevant sw ring + */ +static int al_crypto_do_crypt(struct skcipher_request *req, bool lock) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_skcipher_ctx(tfm); + struct al_crypto_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + enum al_crypto_dir dir = rctx->dir; + struct al_crypto_chan *chan = ctx->chan; + struct al_crypto_transaction *xaction; + int src_nents = 0, dst_nents = 0; + int ivsize = crypto_skcipher_ivsize(tfm); + gfp_t gfp_flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; + struct al_crypto_sw_desc *desc; + int idx, rc; + + dev_dbg(to_dev(chan), + "%s: chan->idx=%d lock=%d cryptlen=%d remaining=%d ivsize=%d (req->src %s req->dst) src_nents=%d dst_nents=%d\n", + __func__, chan->idx, lock, req->cryptlen, rctx->remaining, ivsize, + req->src == req->dst ? "==" : "!=", sg_nents(req->src), sg_nents(req->dst)); + + if (req->cryptlen <= U16_MAX) { + rctx->cryptlen = rctx->remaining; + rctx->src = req->src; + rctx->dst = req->dst; + } else { + struct scatterlist *sg[1]; + size_t sizes[1]; + off_t skip; + + rctx->cryptlen = min(rctx->remaining, ALIGN_DOWN(U16_MAX, crypto_skcipher_blocksize(tfm))); + sizes[0] = rctx->cryptlen; + skip = req->cryptlen - rctx->remaining; + + dev_dbg(to_dev(chan), "%s: sizes=%d skip=%ld\n", + __func__, sizes[0], skip); + + rc = sg_split(req->src, 0, skip, + ARRAY_SIZE(sizes), sizes, sg, + NULL, gfp_flags); + if (rc) + goto fail_free_sgls; + + rctx->src = sg[0]; + + if (req->dst == req->src) { + rctx->dst = rctx->src; + } else { + rc = sg_split(req->dst, 0, skip, + ARRAY_SIZE(sizes), sizes, sg, + NULL, gfp_flags); + if (rc) + goto fail_free_sgls; + + rctx->dst = sg[0]; + } + } + + if (dir == AL_CRYPT_DECRYPT || ctx->sa.enc_type == AL_CRYPT_AES_CTR) { + if (rctx->src == rctx->dst) { + rctx->backup_iv = kmalloc(ivsize, gfp_flags); + if (!rctx->backup_iv) { + rc = -ENOMEM; + goto fail_free_sgls; + } + + BUG_ON(ctx->sa.enc_type != AL_CRYPT_AES_CTR && + rctx->cryptlen < ivsize); + + if (rctx->cryptlen >= ivsize) + scatterwalk_map_and_copy(rctx->backup_iv, rctx->src, + ALIGN_DOWN(rctx->cryptlen - ivsize, ivsize), + ivsize, 0); + } + } + + print_hex_dump_debug(KBUILD_MODNAME ": iv: ", + DUMP_PREFIX_OFFSET, 16, 1, req->iv, ivsize, false); + + src_nents = sg_nents_for_len(rctx->src, rctx->cryptlen); + if (rctx->dst != rctx->src) + dst_nents = sg_nents_for_len(rctx->dst, rctx->cryptlen); + else + dst_nents = src_nents; + + dev_dbg(to_dev(chan), + "%s: src_nents_for_len=%d dst_nents_for_len=%d cryptlen=%d (rctx->src %s rctx->dst) src_nents=%d dst_nents=%d\n", + __func__, src_nents, dst_nents, rctx->cryptlen, rctx->src == rctx->dst ? "==" : "!=", + sg_nents(rctx->src), sg_nents(rctx->dst)); + + if (unlikely(src_nents < 0)) { + dev_err(to_dev(chan), "Insufficient bytes (%d) in src S/G\n", rctx->cryptlen); + rc = src_nents; + goto fail_free_sgls; + } + if (unlikely(dst_nents < 0)) { + dev_err(to_dev(chan), "Insufficient bytes (%d) in dst S/G\n", rctx->cryptlen); + rc = dst_nents; + goto fail_free_sgls; + } + + /* Currently supported max sg chain length is + * AL_CRYPTO_OP_MAX_DATA_BUFS(12) which is minimum of descriptors left + * for data in a transaction: + * tx: 31(supported by HW) - 1(metadata) - 1(sa_in) - + * 1(enc_iv_in|auth_iv_in) - 1(auth_sign_in) = 27 + * rx: 31(supported by HW) - 1(sa_out) - 1(enc_iv_out|auth_iv_out) - + * 1(next_enc_iv_out) - 1(auth_sign_out) = 27 + */ + BUG_ON((src_nents > AL_CRYPTO_OP_MAX_BUFS) || + (dst_nents > AL_CRYPTO_OP_MAX_BUFS)); + + if (likely(rctx->src == rctx->dst)) { + dma_map_sg(to_dev(chan), rctx->src, src_nents, DMA_BIDIRECTIONAL); + } else { + dma_map_sg(to_dev(chan), rctx->src, src_nents, DMA_TO_DEVICE); + dma_map_sg(to_dev(chan), rctx->dst, dst_nents, DMA_FROM_DEVICE); + } + + if (likely(lock)) + spin_lock_bh(&chan->prep_lock); + + if (likely(al_crypto_get_sw_desc(chan, 1) == 0)) + idx = chan->head; + else { + rc = crypto_enqueue_request(&chan->sw_queue, &req->base); + goto fail_dma_unmap; + } + + chan->sw_desc_num_locked = 1; + chan->tx_desc_produced = 0; + + desc = al_crypto_get_ring_ent(chan, idx); + desc->req = (void *)req; + desc->req_type = AL_CRYPTO_REQ_SKCIPHER; + desc->src_nents = src_nents; + desc->dst_nents = dst_nents; + + /* prepare hal transaction */ + xaction = &desc->hal_xaction; + memset(xaction, 0, sizeof(struct al_crypto_transaction)); + xaction->dir = dir; + + al_crypto_prepare_xaction_buffers(req, desc); + + if ((ctx->sa.enc_type != AL_CRYPT_AES_ECB) && + (ctx->sa.enc_type != AL_CRYPT_DES_ECB) && + (ctx->sa.enc_type != AL_CRYPT_TRIPDES_ECB)) { + xaction->enc_iv_in.addr = dma_map_single(to_dev(chan), + req->iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(to_dev(chan), xaction->enc_iv_in.addr)) { + rc = -ENOMEM; + goto fail_dma_unmap; + } + xaction->enc_iv_in.len = ivsize; + + xaction->enc_next_iv_out.addr = ctx->iv_dma_addr; + xaction->enc_next_iv_out.len = ivsize; + } + + if (!ctx->cache_state.cached) { + xaction->sa_indx = al_crypto_cache_replace_lru(chan, &ctx->cache_state, NULL); + xaction->sa_in.addr = ctx->hw_sa_dma_addr; + xaction->sa_in.len = sizeof(struct al_crypto_hw_sa); + } else { + al_crypto_cache_update_lru(chan, &ctx->cache_state); + xaction->sa_indx = ctx->cache_state.idx; + } + + xaction->flags = AL_SSM_INTERRUPT; + + al_crypto_update_stats(xaction, chan); + + /* send crypto transaction to engine */ + rc = al_crypto_dma_prepare(chan->hal_crypto, chan->idx, &desc->hal_xaction); + if (unlikely(rc != 0)) + goto fail_dma_unmap; + + chan->tx_desc_produced += desc->hal_xaction.tx_descs_count; + + dev_dbg(to_dev(chan), + "%s: chan->idx=%d dir=%d ctx->sa.enc_type=%d ctx->cache_state.cached=%d tx_descs_count=%d\n", + __func__, chan->idx, dir, ctx->sa.enc_type, ctx->cache_state.cached, + desc->hal_xaction.tx_descs_count); + + al_crypto_tx_submit(chan); + + if (likely(lock)) + spin_unlock_bh(&chan->prep_lock); + + return -EINPROGRESS; + +fail_dma_unmap: + + al_crypto_dma_unmap(chan, req, src_nents, dst_nents, desc); + + if (likely(lock)) + spin_unlock_bh(&chan->prep_lock); + +fail_free_sgls: + if (rctx->src != NULL && rctx->src != req->src) { + kfree(rctx->src); + rctx->src = NULL; + } + if (rctx->dst != NULL && rctx->dst != req->dst) { + kfree(rctx->dst); + rctx->dst = NULL; + } + + return rc; +} + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_skcipher_ctx(tfm); + struct al_crypto_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + struct al_crypto_chan *chan = ctx->chan; + unsigned int blksize = crypto_skcipher_blocksize(tfm); + + dev_dbg(to_dev(chan), "%s: cryptlen=%d\n", __func__, req->cryptlen); + print_hex_dump_debug(KBUILD_MODNAME ": iv: ", DUMP_PREFIX_OFFSET, + 16, 1, req->iv, crypto_skcipher_ivsize(tfm), false); + + if (req->cryptlen < blksize) + return -EINVAL; + + rctx->dir = AL_CRYPT_ENCRYPT; + rctx->remaining = req->cryptlen; + + return al_crypto_do_crypt(req, true); +} + +/****************************************************************************** + *****************************************************************************/ +static int al_crypto_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_skcipher_ctx(tfm); + struct al_crypto_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + struct al_crypto_chan *chan = ctx->chan; + unsigned int blksize = crypto_skcipher_blocksize(tfm); + + dev_dbg(to_dev(chan), "%s: cryptlen=%d\n", __func__, req->cryptlen); + print_hex_dump_debug(KBUILD_MODNAME ": iv: ", DUMP_PREFIX_OFFSET, + 16, 1, req->iv, crypto_skcipher_ivsize(tfm), false); + + if (req->cryptlen < blksize) + return -EINVAL; + + rctx->dir = AL_CRYPT_DECRYPT; + rctx->remaining = req->cryptlen; + + return al_crypto_do_crypt(req, true); +} + +/****************************************************************************** + *****************************************************************************/ +static struct al_crypto_alg *al_crypto_alg_alloc(struct al_crypto_device *device, + struct al_crypto_skcipher_template *template) +{ + struct al_crypto_alg *t_alg; + struct skcipher_alg *alg; + + t_alg = kzalloc(sizeof(struct al_crypto_alg), GFP_KERNEL); + if (!t_alg) { + dev_err(&device->pdev->dev, "failed to allocate t_alg\n"); + return ERR_PTR(-ENOMEM); + } + + alg = &t_alg->alg; + *alg = template->alg; + + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->driver_name); + + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = AL_CRYPTO_CRA_PRIORITY; + alg->base.cra_blocksize = template->blocksize; + alg->base.cra_alignmask = 0; + alg->base.cra_ctxsize = sizeof(struct al_crypto_ctx); + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + + alg->chunksize = alg->base.cra_blocksize; + + t_alg->enc_type = template->enc_type; + t_alg->sa_op = template->sa_op; + t_alg->device = device; + + return t_alg; +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_req_iv_out(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct al_crypto_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + + if (rctx->dir == AL_CRYPT_ENCRYPT) { + BUG_ON(rctx->cryptlen < ivsize); + scatterwalk_map_and_copy(req->iv, rctx->dst, + rctx->cryptlen - ivsize, ivsize, 0); + } else { + if (rctx->src == rctx->dst) { + memcpy(req->iv, rctx->backup_iv, ivsize); + kfree(rctx->backup_iv); + } else { + BUG_ON(rctx->cryptlen < ivsize); + scatterwalk_map_and_copy(req->iv, rctx->src, + rctx->cryptlen - ivsize, + ivsize, 0); + } + } +} + +/****************************************************************************** + *****************************************************************************/ +static void al_crypto_req_aes_ctr_iv_out(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_skcipher_ctx(tfm); + struct al_crypto_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + unsigned int keylen; + u8 pt[AES_BLOCK_SIZE]; + + BUG_ON(al_crypto_sa_aes_ksize_to_keylen(ctx->sa.aes_ksize, &keylen)); + + if (rctx->cryptlen >= ivsize) { + if (rctx->src == rctx->dst) { + memcpy(pt, rctx->backup_iv, ivsize); + kfree(rctx->backup_iv); + } else { + scatterwalk_map_and_copy(pt, rctx->src, + ALIGN_DOWN(rctx->cryptlen - ivsize, ivsize), + ivsize, 0); + } + + scatterwalk_map_and_copy(req->iv, rctx->dst, + ALIGN_DOWN(rctx->cryptlen - ivsize, ivsize), + ivsize, 0); + + print_hex_dump_debug(KBUILD_MODNAME ": aes-ctr pt: ", + DUMP_PREFIX_OFFSET, 16, 1, pt, ivsize, false); + print_hex_dump_debug(KBUILD_MODNAME ": aes-ctr ct: ", + DUMP_PREFIX_OFFSET, 16, 1, req->iv, ivsize, false); + + crypto_xor(req->iv, pt, ivsize); + aes_expandkey(&ctx->aes_key, ctx->sa.enc_key, keylen); + aes_decrypt(&ctx->aes_key, req->iv, req->iv); + crypto_inc(req->iv, ivsize); + } + + if (rctx->cryptlen & (ivsize - 1)) + crypto_inc(req->iv, ivsize); +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_skcipher_process_queue(struct al_crypto_chan *chan) +{ + struct crypto_async_request *async_req, *backlog; + struct skcipher_request *req; + int err = 0; + + spin_lock_bh(&chan->prep_lock); + + while (al_crypto_ring_space(chan) > 0) { + backlog = crypto_get_backlog(&chan->sw_queue); + async_req = crypto_dequeue_request(&chan->sw_queue); + + dev_dbg(to_dev(chan), "%s: backlog=%d asyn_req=%d\n", + __func__, !!backlog, !!async_req); + + if (!async_req) + break; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + req = container_of(async_req, struct skcipher_request, base); + + err = al_crypto_do_crypt(req, false); + if (err != -EINPROGRESS) + break; + } + + spin_unlock_bh(&chan->prep_lock); + + return err; +} + +/****************************************************************************** + *****************************************************************************/ +/* Cleanup single skcipher request - invoked from cleanup tasklet (interrupt + * handler) + */ +void al_crypto_skcipher_cleanup_single(struct al_crypto_chan *chan, + struct al_crypto_sw_desc *desc, + uint32_t comp_status) +{ + struct skcipher_request *req = (struct skcipher_request *)desc->req; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct al_crypto_ctx *ctx = crypto_skcipher_ctx(tfm); + struct al_crypto_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + int rc; + + dev_dbg(to_dev(chan), "%s: chan->idx=%d comp_status=%x\n", + __func__, chan->idx, comp_status); + + al_crypto_dma_unmap(chan, req, desc->src_nents, desc->dst_nents, desc); + + if (ctx->sa.enc_type != AL_CRYPT_AES_CTR) + al_crypto_req_iv_out(req); + else + al_crypto_req_aes_ctr_iv_out(req); + + print_hex_dump_debug(KBUILD_MODNAME ": out iv: ", + DUMP_PREFIX_OFFSET, 16, 1, req->iv, ivsize, false); + + if (rctx->src != NULL && rctx->src != req->src) { + kfree(rctx->src); + rctx->src = NULL; + } + if (rctx->dst != NULL && rctx->dst != req->dst) { + kfree(rctx->dst); + rctx->dst = NULL; + } + + rctx->remaining -= rctx->cryptlen; + + if (rctx->remaining > 0) { + rc = al_crypto_do_crypt(req, false); + if (rc != -EINPROGRESS) { + dev_err(to_dev(chan), + "al_crypto_do_crypt failed with %d\n", rc); + req->base.complete(&req->base, rc); + } + return; + } + + req->base.complete(&req->base, 0); +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_skcipher_init(struct al_crypto_device *device) +{ + int err = 0; + int i; + + INIT_LIST_HEAD(&device->skcipher_list); + + atomic_set(&device->tfm_count, -1); + + /* register crypto algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + struct al_crypto_alg *t_alg; + + t_alg = al_crypto_alg_alloc(device, &driver_algs[i]); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + dev_warn(&device->pdev->dev, + "%s alg allocation failed with %d\n", + driver_algs[i].driver_name, err); + continue; + } + + err = crypto_register_skciphers(&t_alg->alg, 1); + if (err) { + dev_warn(&device->pdev->dev, + "%s alg registration failed with %d\n", + t_alg->alg.base.cra_driver_name, err); + kfree(t_alg); + } else + list_add_tail(&t_alg->entry, &device->skcipher_list); + } + + if (!list_empty(&device->skcipher_list)) + dev_info(&device->pdev->dev, + "skcipher algorithms registered in /proc/crypto\n"); + + return err; +} + +/****************************************************************************** + *****************************************************************************/ +void al_crypto_skcipher_terminate(struct al_crypto_device *device) +{ + struct al_crypto_alg *t_alg, *n; + + if (!device->skcipher_list.next) + return; + + list_for_each_entry_safe(t_alg, n, &device->skcipher_list, entry) { + crypto_unregister_skciphers(&t_alg->alg, 1); + list_del(&t_alg->entry); + kfree(t_alg); + } +} diff --git a/target/linux/alpine/files/drivers/crypto/al/al_crypto_sysfs.c b/target/linux/alpine/files/drivers/crypto/al/al_crypto_sysfs.c new file mode 100644 index 00000000000000..024e3586011c06 --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/al_crypto_sysfs.c @@ -0,0 +1,539 @@ +/* + * Annapurna Labs Crypto Linux driver - sysfs support + * Copyright(c) 2013 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include + +#include + +#include "al_crypto.h" + +#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) + +#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS +static void al_crypto_release_channel(struct kobject *kobj) +{ + struct al_crypto_chan *chan = + container_of(kobj, struct al_crypto_chan, kobj); + + kfree(chan); +} + +/****************************************************************************** + *****************************************************************************/ +struct al_crypto_chan_attr { + struct attribute attr; + size_t offset; + ssize_t (*show) (struct al_crypto_chan *chan, size_t offset, char *buf); + ssize_t (*store) (struct al_crypto_chan *chan, size_t offset, + const char *buf, size_t size); +}; + +/****************************************************************************** + *****************************************************************************/ +static ssize_t al_crypto_chan_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct al_crypto_chan *chan = + container_of(kobj, struct al_crypto_chan, kobj); + struct al_crypto_chan_attr *chan_attr = + container_of(attr, struct al_crypto_chan_attr, attr); + ssize_t ret = 0; + + if (chan_attr->show) + ret = chan_attr->show(chan, chan_attr->offset, buf); + + return ret; +} + +/****************************************************************************** + *****************************************************************************/ +static ssize_t al_crypto_chan_attr_store(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t size) +{ + struct al_crypto_chan *chan = + container_of(kobj, struct al_crypto_chan, kobj); + struct al_crypto_chan_attr *chan_attr = + container_of(attr, struct al_crypto_chan_attr, attr); + ssize_t ret = 0; + + if (chan_attr->store) + ret = chan_attr->store(chan, chan_attr->offset, buf, size); + + return ret; +} + +/****************************************************************************** + *****************************************************************************/ +static const struct sysfs_ops al_crypto_chan_sysfs_ops = { + .show = al_crypto_chan_attr_show, + .store = al_crypto_chan_attr_store, +}; + +/****************************************************************************** + *****************************************************************************/ +static ssize_t al_crypto_chan_rd_stats_prep( + struct al_crypto_chan *chan, + size_t offset, + char *buf) +{ + uint64_t val; + ssize_t size; + + spin_lock_bh(&chan->prep_lock); + + val = *(uint64_t *)(((uint8_t *)&chan->stats_prep) + offset); + + spin_unlock_bh(&chan->prep_lock); + + size = sprintf(buf, "%llu\n", val); + + return size; +} + +static ssize_t al_crypto_chan_rd_stats_comp( + struct al_crypto_chan *chan, + size_t offset, + char *buf) +{ + uint64_t val; + ssize_t size; + + spin_lock_bh(&chan->cleanup_lock); + + val = *(uint64_t *)(((uint8_t *)&chan->stats_comp) + offset); + + spin_unlock_bh(&chan->cleanup_lock); + + size = sprintf(buf, "%llu\n", val); + + return size; +} + +static ssize_t al_crypto_chan_rd_stats_gen( + struct al_crypto_chan *chan, + size_t offset, + char *buf) +{ + uint64_t val; + ssize_t size; + + val = *(uint64_t *)(((uint8_t *)&chan->stats_gen) + offset); + + size = sprintf(buf, "%llu\n", val); + + return size; +} + + +#define al_crypto_chan_init_attr(_name, _group) \ +static struct al_crypto_chan_attr al_crypto_chan_##_name = { \ + .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ + .offset = offsetof(struct al_crypto_chan_stats_##_group, _name), \ + .show = al_crypto_chan_rd_stats_##_group, \ + .store = NULL, \ +} + +/* Channel attrs */ +al_crypto_chan_init_attr(skcipher_encrypt_reqs, prep); +al_crypto_chan_init_attr(skcipher_encrypt_bytes, prep); +al_crypto_chan_init_attr(skcipher_decrypt_reqs, prep); +al_crypto_chan_init_attr(skcipher_decrypt_bytes, prep); +al_crypto_chan_init_attr(aead_encrypt_hash_reqs, prep); +al_crypto_chan_init_attr(aead_encrypt_bytes, prep); +al_crypto_chan_init_attr(aead_hash_bytes, prep); +al_crypto_chan_init_attr(aead_decrypt_validate_reqs, prep); +al_crypto_chan_init_attr(aead_decrypt_bytes, prep); +al_crypto_chan_init_attr(aead_validate_bytes, prep); +al_crypto_chan_init_attr(ahash_reqs, prep); +al_crypto_chan_init_attr(ahash_bytes, prep); +al_crypto_chan_init_attr(crc_reqs, prep); +al_crypto_chan_init_attr(crc_bytes, prep); +al_crypto_chan_init_attr(cache_misses, prep); +al_crypto_chan_init_attr(skcipher_reqs_le512, prep); +al_crypto_chan_init_attr(skcipher_reqs_512_2048, prep); +al_crypto_chan_init_attr(skcipher_reqs_2048_4096, prep); +al_crypto_chan_init_attr(skcipher_reqs_gt4096, prep); +al_crypto_chan_init_attr(aead_reqs_le512, prep); +al_crypto_chan_init_attr(aead_reqs_512_2048, prep); +al_crypto_chan_init_attr(aead_reqs_2048_4096, prep); +al_crypto_chan_init_attr(aead_reqs_gt4096, prep); +al_crypto_chan_init_attr(ahash_reqs_le512, prep); +al_crypto_chan_init_attr(ahash_reqs_512_2048, prep); +al_crypto_chan_init_attr(ahash_reqs_2048_4096, prep); +al_crypto_chan_init_attr(ahash_reqs_gt4096, prep); +al_crypto_chan_init_attr(crc_reqs_le512, prep); +al_crypto_chan_init_attr(crc_reqs_512_2048, prep); +al_crypto_chan_init_attr(crc_reqs_2048_4096, prep); +al_crypto_chan_init_attr(crc_reqs_gt4096, prep); +al_crypto_chan_init_attr(redundant_int_cnt, comp); +al_crypto_chan_init_attr(max_active_descs, comp); +al_crypto_chan_init_attr(skcipher_tfms, gen); +al_crypto_chan_init_attr(aead_tfms, gen); +al_crypto_chan_init_attr(ahash_tfms, gen); +al_crypto_chan_init_attr(crc_tfms, gen); + +static struct attribute *al_crypto_chan_default_attrs[] = { + &al_crypto_chan_skcipher_encrypt_reqs.attr, + &al_crypto_chan_skcipher_encrypt_bytes.attr, + &al_crypto_chan_skcipher_decrypt_reqs.attr, + &al_crypto_chan_skcipher_decrypt_bytes.attr, + &al_crypto_chan_aead_encrypt_hash_reqs.attr, + &al_crypto_chan_aead_encrypt_bytes.attr, + &al_crypto_chan_aead_hash_bytes.attr, + &al_crypto_chan_aead_decrypt_validate_reqs.attr, + &al_crypto_chan_aead_decrypt_bytes.attr, + &al_crypto_chan_aead_validate_bytes.attr, + &al_crypto_chan_ahash_reqs.attr, + &al_crypto_chan_ahash_bytes.attr, + &al_crypto_chan_cache_misses.attr, + &al_crypto_chan_skcipher_reqs_le512.attr, + &al_crypto_chan_skcipher_reqs_512_2048.attr, + &al_crypto_chan_skcipher_reqs_2048_4096.attr, + &al_crypto_chan_skcipher_reqs_gt4096.attr, + &al_crypto_chan_aead_reqs_le512.attr, + &al_crypto_chan_aead_reqs_512_2048.attr, + &al_crypto_chan_aead_reqs_2048_4096.attr, + &al_crypto_chan_aead_reqs_gt4096.attr, + &al_crypto_chan_ahash_reqs_le512.attr, + &al_crypto_chan_ahash_reqs_512_2048.attr, + &al_crypto_chan_ahash_reqs_2048_4096.attr, + &al_crypto_chan_ahash_reqs_gt4096.attr, + + &al_crypto_chan_redundant_int_cnt.attr, + &al_crypto_chan_max_active_descs.attr, + + &al_crypto_chan_skcipher_tfms.attr, + &al_crypto_chan_aead_tfms.attr, + &al_crypto_chan_ahash_tfms.attr, + NULL +}; + +ATTRIBUTE_GROUPS(al_crypto_chan_default); + +static struct attribute *al_crypto_crc_chan_default_attrs[] = { + &al_crypto_chan_crc_reqs.attr, + &al_crypto_chan_crc_bytes.attr, + &al_crypto_chan_cache_misses.attr, + &al_crypto_chan_crc_reqs_le512.attr, + &al_crypto_chan_crc_reqs_512_2048.attr, + &al_crypto_chan_crc_reqs_2048_4096.attr, + &al_crypto_chan_crc_reqs_gt4096.attr, + + &al_crypto_chan_redundant_int_cnt.attr, + &al_crypto_chan_max_active_descs.attr, + + &al_crypto_chan_crc_tfms.attr, + NULL +}; + +ATTRIBUTE_GROUPS(al_crypto_crc_chan_default); + +static struct kobj_type chan_ktype = { + .sysfs_ops = &al_crypto_chan_sysfs_ops, + .release = al_crypto_release_channel, + .default_groups = al_crypto_chan_default_groups, +}; + +static struct kobj_type crc_chan_ktype = { + .sysfs_ops = &al_crypto_chan_sysfs_ops, + .release = al_crypto_release_channel, + .default_groups = al_crypto_crc_chan_default_groups, +}; + +enum udma_dump_type { + UDMA_DUMP_M2S_REGS, + UDMA_DUMP_M2S_Q_STRUCT, + UDMA_DUMP_M2S_Q_POINTERS, + UDMA_DUMP_S2M_REGS, + UDMA_DUMP_S2M_Q_STRUCT, + UDMA_DUMP_S2M_Q_POINTERS +}; + +/****************************************************************************** + *****************************************************************************/ +static ssize_t rd_udma_dump( + struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + enum udma_dump_type dump_type = (enum udma_dump_type)ea->var; + ssize_t rc = 0; + + switch (dump_type) { + case UDMA_DUMP_M2S_REGS: + case UDMA_DUMP_S2M_REGS: + rc = sprintf( + buf, + "Write mask to dump corresponding udma regs\n"); + break; + case UDMA_DUMP_M2S_Q_STRUCT: + case UDMA_DUMP_S2M_Q_STRUCT: + rc = sprintf( + buf, + "Write q num to dump correspoding q struct\n"); + break; + case UDMA_DUMP_M2S_Q_POINTERS: + case UDMA_DUMP_S2M_Q_POINTERS: + rc = sprintf( + buf, + "Write q num (in hex) and add 1 for submission ring," + " for ex:\n" + "0 for completion ring of q 0\n" + "10 for submission ring of q 0\n"); + break; + default: + break; + } + + return rc; +} + +/****************************************************************************** + *****************************************************************************/ +static ssize_t wr_udma_dump( + struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int err; + int q_id; + unsigned long val; + struct al_udma* dma; + struct dev_ext_attribute *ea = to_ext_attr(attr); + enum udma_dump_type dump_type = (enum udma_dump_type)ea->var; + enum al_udma_ring_type ring_type = AL_RING_COMPLETION; + struct al_crypto_device *device = dev_get_drvdata(dev); + + err = kstrtoul(buf, 16, &val); + if (err < 0) + return err; + + switch (dump_type) { + case UDMA_DUMP_M2S_REGS: + al_ssm_dma_handle_get(&device->hal_crypto, UDMA_TX, &dma); + al_udma_regs_print(dma, val); + break; + case UDMA_DUMP_S2M_REGS: + al_ssm_dma_handle_get(&device->hal_crypto, UDMA_RX, &dma); + al_udma_regs_print(dma, val); + break; + case UDMA_DUMP_M2S_Q_STRUCT: + al_ssm_dma_handle_get(&device->hal_crypto, UDMA_TX, &dma); + al_udma_q_struct_print(dma, val); + break; + case UDMA_DUMP_S2M_Q_STRUCT: + al_ssm_dma_handle_get(&device->hal_crypto, UDMA_RX, &dma); + al_udma_q_struct_print(dma, val); + break; + case UDMA_DUMP_M2S_Q_POINTERS: + if (val & 0x10) + ring_type = AL_RING_SUBMISSION; + q_id = val & 0xf; + al_ssm_dma_handle_get(&device->hal_crypto, UDMA_TX, &dma); + al_udma_ring_print(dma, q_id, ring_type); + break; + case UDMA_DUMP_S2M_Q_POINTERS: + if (val & 0x10) + ring_type = AL_RING_SUBMISSION; + q_id = val & 0xf; + al_ssm_dma_handle_get(&device->hal_crypto, UDMA_RX, &dma); + al_udma_ring_print(dma, q_id, ring_type); + break; + default: + break; + } + + return count; +} + +#define UDMA_DUMP_PREP_ATTR(_name, _type) {\ + __ATTR(udma_dump_##_name, 0660, rd_udma_dump, wr_udma_dump),\ + (void*)_type } + +/* Device attrs - udma debug */ +static struct dev_ext_attribute dev_attr_udma_debug[] = { + UDMA_DUMP_PREP_ATTR(m2s_regs, UDMA_DUMP_M2S_REGS), + UDMA_DUMP_PREP_ATTR(m2s_q_struct, UDMA_DUMP_M2S_Q_STRUCT), + UDMA_DUMP_PREP_ATTR(m2s_q_pointers, UDMA_DUMP_M2S_Q_POINTERS), + UDMA_DUMP_PREP_ATTR(s2m_regs, UDMA_DUMP_S2M_REGS), + UDMA_DUMP_PREP_ATTR(s2m_q_struct, UDMA_DUMP_S2M_Q_STRUCT), + UDMA_DUMP_PREP_ATTR(s2m_q_pointers, UDMA_DUMP_S2M_Q_POINTERS) +}; +#endif /* CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS */ + +enum al_crypto_config_type { + CONFIG_INT_MODERATION +}; + +/****************************************************************************** + *****************************************************************************/ +static ssize_t al_crypto_rd_config( + struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + enum al_crypto_config_type config_type = + (enum al_crypto_config_type)ea->var; + struct al_crypto_device *device = dev_get_drvdata(dev); + ssize_t rc = 0; + + switch (config_type) { + case CONFIG_INT_MODERATION: + rc = sprintf(buf, "%d\n", al_crypto_get_int_moderation(device)); + break; + default: + break; + } + + return rc; +} + +/****************************************************************************** + *****************************************************************************/ +static ssize_t al_crypto_wr_config( + struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int err; + unsigned long val; + struct dev_ext_attribute *ea = to_ext_attr(attr); + enum al_crypto_config_type config_type = + (enum al_crypto_config_type)ea->var; + struct al_crypto_device *device = dev_get_drvdata(dev); + + err = kstrtoul(buf, 10, &val); + if (err < 0) + return err; + + switch (config_type) { + case CONFIG_INT_MODERATION: + al_crypto_set_int_moderation(device, val); + break; + default: + break; + } + + return count; +} + +#define CONFIG_PREP_ATTR(_name, _type) {\ + __ATTR(_name, 0660,\ + al_crypto_rd_config, al_crypto_wr_config),\ + (void*)_type } + +/* Device attrs - config */ +static struct dev_ext_attribute dev_attr_config[] = { + CONFIG_PREP_ATTR(int_moderation, CONFIG_INT_MODERATION), +}; + +/****************************************************************************** + *****************************************************************************/ +void al_crypto_free_channel(struct al_crypto_chan *chan) +{ +#ifndef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS + kfree(chan); +#endif +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_sysfs_init( + struct al_crypto_device *device) +{ + int rc = 0; + struct device* dev = &device->pdev->dev; + int i; + +#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS + + device->channels_kset = + kset_create_and_add("channels", NULL, + &device->pdev->dev.kobj); + if (!device->channels_kset) + return -ENOMEM; + + for (i = 0; i < device->num_channels; i++) { + struct al_crypto_chan *chan = device->channels[i]; + chan->kobj.kset = device->channels_kset; + if (chan->type == AL_CRYPT_AUTH_Q) + rc = kobject_init_and_add(&chan->kobj, &chan_ktype, + NULL, "chan%d", i); + else + rc = kobject_init_and_add(&chan->kobj, &crc_chan_ktype, + NULL, "chan%d", i); + if (rc) { + int j; + for (j = 0; j <= i; j++) + kobject_put(&device->channels[j]->kobj); + kset_unregister(device->channels_kset); + for (j = i+1; j < device->num_channels; j++) + kfree(device->channels[j]); + return -ENOMEM; + } + + kobject_uevent(&chan->kobj, KOBJ_ADD); + } + + for (i = 0; i < ARRAY_SIZE(dev_attr_udma_debug); i++) + rc = sysfs_create_file( + &dev->kobj, + &dev_attr_udma_debug[i].attr.attr); +#endif + for (i = 0; i < ARRAY_SIZE(dev_attr_config); i++) + rc = sysfs_create_file( + &dev->kobj, + &dev_attr_config[i].attr.attr); + + return rc; +} + +/****************************************************************************** + *****************************************************************************/ +void al_crypto_sysfs_terminate( + struct al_crypto_device *device) +{ + int i; + struct device* dev = &device->pdev->dev; + + for (i = 0; i < ARRAY_SIZE(dev_attr_config); i++) + sysfs_remove_file( + &dev->kobj, + &dev_attr_config[i].attr.attr); + +#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS + + for (i = 0; i < ARRAY_SIZE(dev_attr_udma_debug); i++) + sysfs_remove_file( + &dev->kobj, + &dev_attr_udma_debug[i].attr.attr); + + for (i = 0; i < device->num_channels; i++) + kobject_put(&device->channels[i]->kobj); + kset_unregister(device->channels_kset); +#endif +} diff --git a/target/linux/alpine/files/drivers/crypto/al/al_hal_ssm_crc_memcpy.c b/target/linux/alpine/files/drivers/crypto/al/al_hal_ssm_crc_memcpy.c new file mode 100644 index 00000000000000..9eefd1dee60b6e --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/al_hal_ssm_crc_memcpy.c @@ -0,0 +1,668 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +/** How many descriptors to save between head and tail in case of + * wrap around. + */ +#define AL_CRC_MEMCPY_DESC_RES 0 + +#define CRC_MEMCPY_DEBUG + +#ifdef CRC_MEMCPY_DEBUG +#define al_debug al_dbg +#else +#define al_debug(...) +#endif + +#ifdef CRC_MEMCPY_DEBUG +void al_print_desc(union al_udma_desc *desc) +{ + al_dbg("crc_memcpy: Desc: %08x %08x %08x %08x\n", + desc->tx_meta.len_ctrl, desc->tx_meta.meta_ctrl, + desc->tx_meta.meta1, desc->tx_meta.meta2); +} + +static +void al_print_crc_xaction(struct al_crc_transaction *xaction) { + unsigned int i; + + al_dbg("crc_memcpy: CRC Transaction debug\n"); + al_dbg(" CRC TYPE: "); + switch (xaction->crcsum_type) { + case(AL_CRC_CHECKSUM_NULL): + al_dbg(" NULL\n"); + break; + case(AL_CRC_CHECKSUM_CRC32): + al_dbg(" CRC32\n"); + break; + case(AL_CRC_CHECKSUM_CRC32C): + al_dbg(" CRC32C\n"); + break; + case(AL_CRC_CHECKSUM_CKSM16): + al_dbg(" CKSM16\n"); + break; + } + al_dbg(" Flags %d\n", xaction->flags); + + al_dbg("-SRC num of buffers %d\n", + xaction->src.num); + for (i = 0 ; i < xaction->src.num; i++) + al_dbg(" addr 0x%016llx len %d\n", + (unsigned long long)xaction->src.bufs[i].addr, + xaction->src.bufs[i].len); + + al_dbg("-DST num of buffers %d\n", + xaction->dst.num); + for (i = 0 ; i < xaction->dst.num; i++) + al_dbg(" addr 0x%016llx len %d\n", + (unsigned long long)xaction->dst.bufs[i].addr, + xaction->dst.bufs[i].len); + + al_dbg(" CRC IV IN size: %d, addr 0x%016llx\n", + xaction->crc_iv_in.len, + (unsigned long long)xaction->crc_iv_in.addr); + al_dbg(" Cached CRC index %d\n", xaction->cached_crc_indx); + al_dbg(" Save CRC IV in cache: %d\n", xaction->save_crc_iv); + al_dbg(" Store CRC Out in cache: %d\n", xaction->st_crc_out); + al_dbg(" CRC Expected size: %d, addr 0x%016llx\n", + xaction->crc_expected.len, + (unsigned long long)xaction->crc_expected.addr); + al_dbg(" CRC OUT size: %d, addr 0x%016llx\n", + xaction->crc_out.len, + (unsigned long long)xaction->crc_out.addr); + al_dbg(" SWAP flags %x\n", xaction->swap_flags); + al_dbg(" XOR Valid: %d XOR in: %x XOR out: %x\n", + xaction->xor_valid, xaction->in_xor, xaction->res_xor); + + +} +#else +#define al_print_desc(x) +#define al_print_crc_xaction(x) +#endif + +/** + * Fill one rx submission descriptor + * + * @param rx_udma_q rx udma handle + * @param flags flags for the descriptor + * @param buf destination buffer + * @param vmid virtual machine ID + */ +static INLINE +void al_crc_memcpy_prep_one_rx_desc(struct al_udma_q *rx_udma_q, + uint32_t flags, struct al_buf *buf, uint16_t vmid) +{ + uint64_t vmid_shifted = ((uint64_t)vmid) << AL_UDMA_DESC_VMID_SHIFT; + uint32_t flags_len = flags; + union al_udma_desc *rx_desc; + uint32_t ring_id; + + rx_desc = al_udma_desc_get(rx_udma_q); + /* get ring id */ + ring_id = al_udma_ring_id_get(rx_udma_q) + << AL_M2S_DESC_RING_ID_SHIFT; + + flags_len |= ring_id; + + flags_len |= buf->len & AL_M2S_DESC_LEN_MASK; + rx_desc->rx.len_ctrl = swap32_to_le(flags_len); + rx_desc->rx.buf1_ptr = swap64_to_le(buf->addr | vmid_shifted); + al_print_desc(rx_desc); +} + +/** + * Fill one tx submission descriptor + * + * @param tx_udma_q tx udma handle + * @param flags flags for the descriptor + * @param meta metadata word1 + * @param buf source buffer + * @param vmid virtual machine ID + */ +static INLINE void al_crc_memcpy_prep_one_tx_desc(struct al_udma_q *tx_udma_q, + uint32_t flags, uint32_t meta, struct al_buf *buf, + uint16_t vmid) +{ + uint64_t vmid_shifted = ((uint64_t)vmid) << AL_UDMA_DESC_VMID_SHIFT; + uint32_t flags_len = flags; + union al_udma_desc *tx_desc; + uint32_t ring_id; + + tx_desc = al_udma_desc_get(tx_udma_q); + /* get ring id */ + ring_id = al_udma_ring_id_get(tx_udma_q) + << AL_M2S_DESC_RING_ID_SHIFT; + + flags_len |= ring_id; + + flags_len |= buf->len & AL_M2S_DESC_LEN_MASK; + tx_desc->tx.len_ctrl = swap32_to_le(flags_len); + tx_desc->tx.meta_ctrl = swap32_to_le(meta); + tx_desc->tx.buf_ptr = swap64_to_le(buf->addr | vmid_shifted); + al_print_desc(tx_desc); +} + + +/** + * Get number of rx submission descriptors needed for crc transaction + * + * we need rx descriptor for each destination buffer. + * if the transaction doesn't have destination buffers, then one + * descriptor is needed + * + * @param xaction transaction context + * + * @return number of rx submission descriptors + */ +static INLINE +uint32_t _al_crcsum_xaction_rx_descs_count(struct al_crc_transaction *xaction) +{ + uint32_t count = xaction->dst.num + (xaction->crc_out.len ? 1 : 0); + + /* valid crc rx descs count */ + al_assert(count <= AL_SSM_MAX_SRC_DESCS); + + return count; +} + +/** + * Get number of tx submission descriptors needed for crc transaction + * + * we need tx descriptor for each source buffer. + * + * @param xaction transaction context + * + * @return number of tx submission descriptors + */ +static INLINE +uint32_t _al_crcsum_xaction_tx_descs_count(struct al_crc_transaction *xaction) +{ + uint32_t count = xaction->src.num + (xaction->crc_iv_in.len ? 1 : 0) + + (xaction->crc_expected.len ? 1 : 0); + + /* valid crc tx descs count */ + al_assert(count); + /* Need one for metadata if offsets are valid */ + count += (xaction->xor_valid) ? 1 : 0; + /* valid crc tx descs count */ + al_assert(count <= AL_SSM_MAX_SRC_DESCS); + + return count; +} + +/** + * Fill the memcpy rx submission descriptors + * + * this function writes the contents of the rx submission descriptors + * + * @param rx_udma_q rx udma handle + * @param xaction transaction context + * @param rx_desc_cnt number of total rx descriptors + */ +static INLINE +void al_crc_memcpy_set_memcpy_rx_descs(struct al_udma_q *rx_udma_q, + struct al_memcpy_transaction *xaction, uint32_t rx_desc_cnt) +{ + uint32_t flags = 0; + union al_udma_desc *rx_desc; + uint32_t buf_idx; + + /* Set descriptor flags */ + flags |= (xaction->flags & AL_SSM_INTERRUPT) ? AL_M2S_DESC_INT_EN : 0; + flags |= (xaction->flags & AL_SSM_DEST_NO_SNOOP) ? + AL_M2S_DESC_NO_SNOOP_H : 0; + + /* if the xaction doesn't have destination buffers, + * allocate single Meta descriptor + */ + if (unlikely(!rx_desc_cnt)) { + al_debug("crc_memcpy: Preparing Memcpy Meta Rx desc\n"); + rx_desc = al_udma_desc_get(rx_udma_q); + flags |= al_udma_ring_id_get(rx_udma_q) + << AL_M2S_DESC_RING_ID_SHIFT; + flags |= RX_DESC_META; + /* write back flags */ + rx_desc->rx.len_ctrl = swap32_to_le(flags); + al_print_desc(rx_desc); + return; + } + + /* dst exist -> will copy the buf to the destination */ + if (xaction->dst.num) { + struct al_buf *buf = xaction->dst.bufs; + al_debug("crc_memcpy: Preparing %d Memcpy DST Rx desc\n", + xaction->dst.num); + for (buf_idx = 0; buf_idx < xaction->dst.num; buf_idx++) { + al_crc_memcpy_prep_one_rx_desc( + rx_udma_q, flags, buf, xaction->dst.vmid); + buf++; + } + } + +} + + +/** + * Fill the crc/checksum rx submission descriptors + * + * this function writes the contents of the rx submission descriptors + * + * @param rx_udma_q rx udma handle + * @param xaction transaction context + * @param rx_desc_cnt number of total rx descriptors + */ +static INLINE +void al_crc_memcpy_set_crc_rx_descs(struct al_udma_q *rx_udma_q, + struct al_crc_transaction *xaction, uint32_t rx_desc_cnt) +{ + uint32_t flags = 0; + union al_udma_desc *rx_desc; + uint32_t buf_idx; + + /* Set descriptor flags */ + flags = (xaction->flags & AL_SSM_INTERRUPT) ? AL_M2S_DESC_INT_EN : 0; + flags |= (xaction->flags & AL_SSM_DEST_NO_SNOOP) ? + AL_M2S_DESC_NO_SNOOP_H : 0; + + /* if the xaction doesn't have destination buffers, + * allocate single Meta descriptor, + */ + if (unlikely(!rx_desc_cnt)) { + al_debug("crc_memcpy: Preparing CRC Meta Rx desc\n"); + rx_desc = al_udma_desc_get(rx_udma_q); + flags |= al_udma_ring_id_get(rx_udma_q) + << AL_M2S_DESC_RING_ID_SHIFT; + flags |= RX_DESC_META; + /* write back flags */ + rx_desc->rx.len_ctrl = swap32_to_le(flags); + al_print_desc(rx_desc); + return; + } + + /* dst exist -> will copy the buf to the destination */ + if (xaction->dst.num) { + struct al_buf *buf = xaction->dst.bufs; + al_debug("crc_memcpy: Preparing %d CRC DST Rx desc\n", + xaction->dst.num); + for (buf_idx = 0; buf_idx < xaction->dst.num; buf_idx++) { + al_crc_memcpy_prep_one_rx_desc( + rx_udma_q, flags, buf, xaction->dst.vmid); + buf++; + } + } + + /* crc/checksum output */ + if (xaction->crc_out.len) { + al_debug("crc_memcpy: Preparing CRC out Rx desc\n"); + al_crc_memcpy_prep_one_rx_desc(rx_udma_q, flags, + &xaction->crc_out, xaction->misc_vmid); + } + +} + +/** + * Fill the memcpy tx submission descriptors + * + * this function writes the contents of the tx submission descriptors + * + * @param tx_udma_q tx udma handle + * @param xaction transaction context + */ +static INLINE +void al_crc_memcpy_set_memcpy_tx_descs(struct al_udma_q *tx_udma_q, + struct al_memcpy_transaction *xaction) +{ + struct al_buf *buf = xaction->src.bufs; + uint32_t flags = 0; + uint32_t buf_idx; + uint32_t word1_meta = 0; + + /* Set flags */ + flags |= AL_M2S_DESC_FIRST; + flags |= xaction->flags & AL_SSM_SRC_NO_SNOOP ? + AL_M2S_DESC_NO_SNOOP_H : 0; + + /* Set first desc word1 metatdata */ + word1_meta |= AL_CRC_CHECKSUM << TX_DESC_META_OP_SHIFT; + word1_meta |= AL_CRC_CHECKSUM_NULL << TX_DESC_META_CRC_OP_TYPE_SHIFT; + word1_meta |= TX_DESC_META_CRC_SEND_ORIG; + word1_meta |= RX_DESC_META_CRC_FIRST_BUF; + word1_meta |= RX_DESC_META_CRC_LAST_BUF; + + flags |= xaction->flags & AL_SSM_BARRIER ? AL_M2S_DESC_DMB : 0; + + al_debug("crc_memcpy: Preparing %d Memcpy SRC Tx desc\n", + xaction->src.num); + + for (buf_idx = 0; buf_idx < xaction->src.num; buf_idx++) { + /* check for last */ + if (buf_idx == (xaction->src.num - 1)) + flags |= AL_M2S_DESC_LAST; + + al_crc_memcpy_prep_one_tx_desc( + tx_udma_q, flags, word1_meta, buf, xaction->src.vmid); + word1_meta = 0; + /* clear first and DMB flags, keep no snoop hint flag */ + flags &= AL_M2S_DESC_NO_SNOOP_H; + flags |= AL_M2S_DESC_CONCAT; + buf++; + } + +} + +/** + * Fill the crc/checksum tx submission descriptors + * + * this function writes the contents of the tx submission descriptors + * + * @param tx_udma_q tx udma handle + * @param xaction transaction context + */ +static INLINE +void al_crc_memcpy_set_crc_tx_descs(struct al_udma_q *tx_udma_q, + struct al_crc_transaction *xaction) +{ + struct al_buf *buf = xaction->src.bufs; + uint32_t flags = 0; + uint32_t buf_idx; + uint32_t word1_meta; + + /* Set flags */ + flags = AL_M2S_DESC_FIRST; + flags |= xaction->flags & AL_SSM_SRC_NO_SNOOP ? + AL_M2S_DESC_NO_SNOOP_H : 0; + + /* Set first desc word1 metatdata */ + word1_meta = AL_CRC_CHECKSUM << TX_DESC_META_OP_SHIFT; + word1_meta |= xaction->crcsum_type << TX_DESC_META_CRC_OP_TYPE_SHIFT; + word1_meta |= xaction->dst.num ? TX_DESC_META_CRC_SEND_ORIG : 0; + word1_meta |= xaction->save_crc_iv ? TX_DESC_META_CRC_ST_CRC_IV : 0; + word1_meta |= xaction->st_crc_out ? TX_DESC_META_CRC_SAVE_IV : 0; + word1_meta |= xaction->crc_out.len ? TX_DESC_META_CRC_SEND_CRC : 0; + word1_meta |= xaction->crc_iv_in.len ? 0 : TX_DESC_META_CRC_USE_ST_IV; + word1_meta |= xaction->crc_expected.len ? TX_DESC_META_CRC_VALID : 0; + word1_meta |= (xaction->swap_flags << TX_DESC_META_CRC_SWAP_SHIFT) + & TX_DESC_META_CRC_SWAP_MASK; + word1_meta |= (xaction->cached_crc_indx << TX_DESC_META_CRC_IDX_SHIFT) + & TX_DESC_META_CRC_IDX_MASK; + + /* if xor fields are valid first desc is metadata */ + if (unlikely(xaction->xor_valid)) { + uint32_t flags_len = flags; + union al_udma_desc *tx_desc; + uint32_t ring_id; + + al_debug("crc_memcpy: preparing CRC metadata desc\n"); + tx_desc = al_udma_desc_get(tx_udma_q); + /* UDMA feilds */ + ring_id = al_udma_ring_id_get(tx_udma_q) + << AL_M2S_DESC_RING_ID_SHIFT; + flags_len |= ring_id; + flags_len |= AL_M2S_DESC_META_DATA; + tx_desc->tx_meta.len_ctrl = swap32_to_le(flags_len); + /* Word1 metadata */ + tx_desc->tx_meta.meta_ctrl = 0; + /* Word 2 metadat */ + tx_desc->tx_meta.meta1 = swap32_to_le(xaction->in_xor); + /* Word 3 metadata */ + tx_desc->tx_meta.meta2 = swap32_to_le(xaction->res_xor); + al_print_desc(tx_desc); + /* clear first flag, keep no snoop hint flag */ + /* Indicate Last Block */ + flags &= AL_M2S_DESC_NO_SNOOP_H; + + } + + flags |= xaction->flags & AL_SSM_BARRIER ? AL_M2S_DESC_DMB : 0; + + word1_meta |= RX_DESC_META_CRC_FIRST_BUF; + + /* CRC IV in */ + if (xaction->crc_iv_in.len) { + al_debug("CRC_memcpy: Preparing CRC IV in Tx desc\n"); + /* check for last */ + flags |= xaction->src.num ? 0 : AL_M2S_DESC_LAST; + + word1_meta |= xaction->src.num ? 0 : RX_DESC_META_CRC_LAST_BUF; + + al_crc_memcpy_prep_one_tx_desc(tx_udma_q, flags, word1_meta, + &xaction->crc_iv_in, xaction->misc_vmid); + word1_meta = 0; + /* clear first and DMB flags, keep no snoop hint flag */ + flags &= AL_M2S_DESC_NO_SNOOP_H; + + } + + /* CRC IV expected */ + if (xaction->crc_expected.len) { + al_debug("CRC_memcpy: Preparing CRC expected Tx desc\n"); + + al_crc_memcpy_prep_one_tx_desc(tx_udma_q, flags, word1_meta, + &xaction->crc_expected, xaction->misc_vmid); + word1_meta = 0; + /* clear first and DMB flags, keep no snoop hint flag */ + flags &= AL_M2S_DESC_NO_SNOOP_H; + + } + + al_debug("CRC_memcpy: Preparing %d CRC SRC Tx desc\n", + xaction->src.num); + + /* Indicate Last Block */ + word1_meta |= RX_DESC_META_CRC_LAST_BUF; + for (buf_idx = 0; buf_idx < xaction->src.num; buf_idx++) { + /* check for last */ + if (buf_idx == (xaction->src.num - 1)) + flags |= AL_M2S_DESC_LAST; + + al_crc_memcpy_prep_one_tx_desc( + tx_udma_q, flags, word1_meta, buf, xaction->src.vmid); + word1_meta = 0; + /* clear first and DMB flags, keep no snoop hint flag */ + flags &= AL_M2S_DESC_NO_SNOOP_H; + flags |= AL_M2S_DESC_CONCAT; + buf++; + } + +} + +int al_memcpy_prepare(struct al_ssm_dma *dma, uint32_t qid, + struct al_memcpy_transaction *xaction) +{ + uint32_t rx_descs = xaction->dst.num; + uint32_t tx_descs = xaction->src.num; + struct al_udma_q *rx_udma_q; + struct al_udma_q *tx_udma_q; + int rc; + + al_debug("%s\n", __func__); + + /* Queue is for memcpy transactions */ + al_assert(dma->q_types[qid] == AL_MEM_CRC_MEMCPY_Q); + + /* ensure enough rx/tx udma descs */ + rc = al_udma_q_handle_get(&dma->m2m_udma.rx_udma, qid, &rx_udma_q); + al_assert(!rc); + if (unlikely(al_udma_available_get(rx_udma_q) < + (rx_descs ? rx_descs : 1))) { + al_dbg("memcpy[%s]:rx q has no enough free desc", + dma->m2m_udma.name); + return -ENOSPC; + } + + rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q); + al_assert(!rc); + if (unlikely(al_udma_available_get(tx_udma_q) < tx_descs + + AL_CRC_MEMCPY_DESC_RES)) { + al_dbg("memcpy[%s]:tx q has no enough free desc", + dma->m2m_udma.name); + return -ENOSPC; + } + + /* prepare memcpy rx/tx descs */ + al_crc_memcpy_set_memcpy_rx_descs(rx_udma_q, xaction, rx_descs); + al_crc_memcpy_set_memcpy_tx_descs(tx_udma_q, xaction); + /* add rx descriptors */ + al_udma_desc_action_add(rx_udma_q, rx_descs ? rx_descs : 1); + + /* set number of tx descriptors */ + xaction->tx_descs_count = tx_descs; + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +int al_crc_csum_prepare(struct al_ssm_dma *dma, uint32_t qid, + struct al_crc_transaction *xaction) +{ + uint32_t rx_descs; + uint32_t tx_descs; + struct al_udma_q *rx_udma_q; + struct al_udma_q *tx_udma_q; + int rc; + + al_debug("al_crc_csum\n"); + al_print_crc_xaction(xaction); + + /* Queue is for crc/csum transactions */ + al_assert(dma->q_types[qid] == AL_MEM_CRC_MEMCPY_Q); + + /* Save and store together */ + al_assert(((xaction->save_crc_iv == 0) || (xaction->st_crc_out == 0))); + + /* calc tx (M2S) descriptors */ + tx_descs = _al_crcsum_xaction_tx_descs_count(xaction); + rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q); + /* valid CRC/CSUM tx q handle */ + al_assert(!rc); + if (unlikely(al_udma_available_get(tx_udma_q) < tx_descs + + AL_CRC_MEMCPY_DESC_RES)) { + al_dbg("crc_csum[%s]:tx q has no enough free desc", + dma->m2m_udma.name); + return -ENOSPC; + } + + /* calc rx (S2M) descriptors, at least one desc is required */ + rx_descs = _al_crcsum_xaction_rx_descs_count(xaction); + rc = al_udma_q_handle_get(&dma->m2m_udma.rx_udma, qid, &rx_udma_q); + /* valid CRC/CSUM rx q handle */ + al_assert(!rc); + if (unlikely(al_udma_available_get(rx_udma_q) < + (rx_descs ? rx_descs : 1))) { + al_dbg("crc_csum[%s]:rx q has no enough free desc", + dma->m2m_udma.name); + return -ENOSPC; + } + + /* prepare crc/checksum rx descs */ + al_crc_memcpy_set_crc_rx_descs(rx_udma_q, xaction, rx_descs); + /* prepare crc/checksum tx descs */ + al_crc_memcpy_set_crc_tx_descs(tx_udma_q, xaction); + /* add rx descriptors */ + al_udma_desc_action_add(rx_udma_q, rx_descs ? rx_descs : 1); + + /* set number of tx descriptors */ + xaction->tx_descs_count = tx_descs; + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +int al_crc_memcpy_dma_action(struct al_ssm_dma *dma, uint32_t qid, + int tx_descs) +{ + struct al_udma_q *tx_udma_q; + int rc; + + rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q); + /* valid CRC/CSUM tx q handle */ + al_assert(!rc); + + /* add tx descriptors */ + al_udma_desc_action_add(tx_udma_q, tx_descs); + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +int al_crc_memcpy_dma_completion(struct al_ssm_dma *dma, uint32_t qid, + uint32_t *comp_status) +{ + struct al_udma_q *rx_udma_q; + struct al_udma_q *tx_udma_q; + volatile union al_udma_cdesc *cdesc; + int rc; + uint32_t cdesc_count; + + rc = al_udma_q_handle_get(&dma->m2m_udma.rx_udma, qid, &rx_udma_q); + /* valid comp rx q handle */ + al_assert(!rc); + + cdesc_count = al_udma_cdesc_packet_get(rx_udma_q, &cdesc); + if (!cdesc_count) + return 0; + + /* if we have multiple completion descriptors, + then last one will have the valid status */ + if (unlikely(cdesc_count > 1)) + cdesc = al_cdesc_next(rx_udma_q, cdesc, cdesc_count - 1); + + *comp_status = swap32_from_le(cdesc->al_desc_comp_rx.ctrl_meta) & + RX_COMP_STATUS_MASK; + + al_udma_cdesc_ack(rx_udma_q, cdesc_count); + + al_debug("crc_memcpy packet completed. count %d status desc %p meta %x\n", + cdesc_count, cdesc, cdesc->al_desc_comp_rx.ctrl_meta); + + /* cleanup tx completion queue */ + rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q); + /* valid comp tx q handle */ + al_assert(!rc); + + cdesc_count = al_udma_cdesc_get_all(tx_udma_q, NULL); + if (cdesc_count) + al_udma_cdesc_ack(tx_udma_q, cdesc_count); + + return 1; +} diff --git a/target/linux/alpine/files/drivers/crypto/al/al_hal_ssm_crypto.c b/target/linux/alpine/files/drivers/crypto/al/al_hal_ssm_crypto.c new file mode 100644 index 00000000000000..d2bbd03c555508 --- /dev/null +++ b/target/linux/alpine/files/drivers/crypto/al/al_hal_ssm_crypto.c @@ -0,0 +1,950 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +/* + * Rx (S2M) Descriptors + */ +#define RX_DESC_META (1<<30) /* Meta data */ + +/* Tx (M2S) word1 common Descriptors */ +#define TX_DESC_META_OP_MASK (0x3<<23) +#define TX_DESC_META_OP_SHIFT (23) + +/* + * Crypto + */ +#define TX_DESC_META_CRYPT_DIR_SHIFT (22) /* Direction */ +#define TX_DESC_META_CRYPT_S_SA (1<<21) /* Evict SA */ +#define TX_DESC_META_CRYPT_S_ENCIV (1<<20) /* Send IV */ +#define TX_DESC_META_CRYPT_SEND_ORIG (1<<19) /* Send original packet */ +#define TX_DESC_META_CRYPT_SEND_AUTHIV (1<<18) /* Send Authentication IV */ +#define TX_DESC_META_CRYPT_S_SIGN (1<<17) /* Send Sign */ + +#define TX_DESC_META_AUTH_FIRST (1<<16) /* Auth only first bit */ +#define TX_DESC_META_AUTH_LAST (1<<15) /* Auth only last bit */ + +#define TX_DESC_META_AUTH_VALID (1<<14) /* Validate Signature */ + +#define TX_DESC_META_SA_IDX_MASK (0xff<<5) /* SA index mask */ +#define TX_DESC_META_SA_IDX_SHIFT (5) + +#define TX_DESC_META_BUF_TYPE_MASK (0x7)/* Buffer type mask */ +#define TX_DESC_META_BUF_TYPE_SHIFT (0) + +/* Tx (M2S) word2 Descriptors */ +#define TX_DESC_META_ENC_OFF_MASK (0xffff<<16) +#define TX_DESC_META_ENC_OFF_SHIFT (16) +#define TX_DESC_META_ENC_OFF_EOP_MASK (0xffff) +#define TX_DESC_META_ENC_OFF_EOP_SHIFT (0) + +/* Tx (M2S) word3 Descriptors */ +#define TX_DESC_META_AUTH_OFF_MASK (0xffff<<16) +#define TX_DESC_META_AUTH_OFF_SHIFT (16) +#define TX_DESC_META_AUTH_OFF_EOP_MASK (0xffff) +#define TX_DESC_META_AUTH_OFF_EOP_SHIFT (0) + +#define RX_COMP_STATUS_MASK (AL_CRYPT_AUTH_ERROR | \ + AL_CRYPT_SA_IV_EVICT_FIFO_ERROR | \ + AL_CRYPT_DES_ILLEGAL_KEY_ERROR | \ + AL_CRYPT_M2S_ERROR | \ + AL_CRYPT_SRAM_PARITY_ERROR | \ + AL_CRYPT_INTERNAL_FLOW_VIOLATION_ERROR) + +/* + * Crypto DMA operation (Enc, Auth or Enc + Auth) + */ +#define AL_CRYPT_OP 3 + +/** Crypto DMA buffer types */ +enum al_crypto_buf_type { + AL_CRYPT_BUF_SA_UPDATE = 0, + AL_CRYPT_BUF_ENC_IV = 1, + AL_CRYPT_BUF_AUTH_IV = 2, + AL_CRYPT_BUF_SRC = 3, + AL_CRYPT_BUF_AUTH_SIGN = 4 +}; + + +/* + * SA + */ +/* Word 0 */ +#define CRYPT_SAD_OP_MASK (0x3<<30)/* Crypto Operation */ +#define CRYPT_SAD_OP_SHIFT (30) +#define CRYPT_SAD_ENC_TYPE_MASK (0xf<<25)/* Crypto Type */ +#define CRYPT_SAD_ENC_TYPE_SHIFT (25) +#define CRYPT_SAD_TRIPDES_MODE_MASK (0x1<<22)/* 3DES mode */ +#define CRYPT_SAD_TRIPDES_MODE_SHIFT (22) +#define CRYPT_SAD_AES_KEY_SIZE_MASK (0x3<<20)/* AES key size */ +#define CRYPT_SAD_AES_KEY_SIZE_SHIFT (20) +#define CRYPT_SAD_AUTH_TYPE_MASK (0xf<<12)/* Auth type */ +#define CRYPT_SAD_AUTH_TYPE_SHIFT (12) +#define CRYPT_SAD_SIGN_SIZE_MASK (0xf<<8) /* Signature size */ +#define CRYPT_SAD_SIGN_SIZE_SHIFT (8) +#define CRYPT_SAD_SHA2_KEY_SIZE_MASK (0x3<<6) /* Sha2 key size */ +#define CRYPT_SAD_SHA2_KEY_SIZE_SHIFT (6) +#define CRYPT_SAD_HMAC_EN (1<<5) /* Hmac enable */ +#define CRYPT_SAD_SIGN_AFTER_ENC (1<<4) /* Sign after encryption */ +#define CRYPT_SAD_AUTH_AFTER_DEC (1<<3) /* Auth after decryption */ +#define CRYPT_SAD_AUTH_MSB_BITS (1<<2) /* Auth use the more significant + bits of the signature */ +#define CRYPT_SAD_CNTR_SIZE_MASK (0x3) /* Counter size */ +#define CRYPT_SAD_CNTR_SIZE_SHIFT (0) + + +/* Word 1 */ +#define CRYPT_SAD_CCM_CBC_IV_ADD_SWORD (1) +#define CRYPT_SAD_CCM_CBC_IV_ADD_SIZE (1) + +/* Word 2 */ +#define CRYPT_SAD_ENC_OFF_MASK (0xffff<<16)/*Enc off- start of pkt*/ +#define CRYPT_SAD_ENC_OFF_SHIFT (16) +#define CRYPT_SAD_ENC_OFF_EOP_MASK (0xffff)/*Enc off- end of pkt*/ +#define CRYPT_SAD_ENC_OFF_EOP_SHIFT (0) + +/* Word 3 */ +#define CRYPT_SAD_AUTH_OFF_MASK (0xffff<<16) /*Auth off- start of pkt*/ +#define CRYPT_SAD_AUTH_OFF_SHIFT (16) +#define CRYPT_SAD_AUTH_OFF_EOP_MASK (0xffff) /*Auth off- end of pkt*/ +#define CRYPT_SAD_AUTH_OFF_EOP_SHIFT (0) + +/* Other words */ +#define CRYPT_SAD_ENC_KEY_SWORD (4) /* Encryption Key */ +#define CRYPT_SAD_ENC_KEY_SIZE (8) +#define CRYPT_SAD_ENC_IV_SWORD (12) /* Encryption IV */ +#define CRYPT_SAD_ENC_IV_SIZE (4) /* Engine update this field */ +#define CRYPT_SAD_GCM_AUTH_IV_SWORD (16) /* GCM Auth IV */ +#define CRYPT_SAD_GCM_AUTH_IV_SIZE (4) +#define CRYPT_SAD_AUTH_IV_SWORD (12) /* Auth Only IV */ +#define CRYPT_SAD_AUTH_IV_SIZE (16) /* Engine update this field */ +#define CRYPT_SAD_HMAC_IV_IN_SWORD (28) /* HMAC_IV_in H(k xor ipad) */ +#define CRYPT_SAD_HMAC_IV_IN_SIZE (16) +#define CRYPT_SAD_HMAC_IV_OUT_SWORD (44) /* HMAC_IV_out H(k xor opad) */ +#define CRYPT_SAD_HMAC_IV_OUT_SIZE (16) + + +#define sa_init_field(dest, val, mask, shift, str)\ + do {\ + al_assert(!((val << shift) & ~(mask)));\ + al_debug(" SA %s - %x\n", str, val); \ + dest |= (val << shift) & mask;\ + } while (0); +/** + * DEBUG + */ +#ifdef CRYPTO_DEBUG +void al_print_crypto_desc(union al_udma_desc *desc) +{ + al_dbg(" Crypto: Desc: %08x %08x %08x %08x\n", + desc->tx_meta.len_ctrl, desc->tx_meta.meta_ctrl, + desc->tx_meta.meta1, desc->tx_meta.meta2); +} + +static +void al_print_crypto_xaction(struct al_crypto_transaction *xaction) +{ + unsigned int i; + + al_dbg("Crypto: Transaction debug\n"); + al_dbg(" Direction %s\n", + (xaction->dir == AL_CRYPT_ENCRYPT) ? "Encrypt" : "Decrypt"); + al_dbg(" Flags %d\n", xaction->flags); + + al_dbg("-SRC buf size %d num of buffers %d\n", + xaction->src_size, xaction->src.num); + for (i = 0 ; i < xaction->src.num; i++) + al_dbg(" addr 0x%016llx len %d\n", + (unsigned long long)xaction->src.bufs[i].addr, + xaction->src.bufs[i].len); + + al_dbg("-DST num of buffers %d\n", + xaction->dst.num); + for (i = 0 ; i < xaction->dst.num; i++) + al_dbg(" addr 0x%016llx len %d\n", + (unsigned long long)xaction->dst.bufs[i].addr, + xaction->dst.bufs[i].len); + + al_dbg("-SA index %d address 0x%016llx len %d\n", + xaction->sa_indx, (unsigned long long)xaction->sa_in.addr, + xaction->sa_in.len); + al_dbg(" SA OUT size: %d , addr 0x%016llx\n", + xaction->sa_out.len, + (unsigned long long)xaction->sa_out.addr); + + al_dbg("-Enc IV IN size: %d, addr 0x%016llx\n", + xaction->enc_iv_in.len, + (unsigned long long)xaction->enc_iv_in.addr); + al_dbg(" Enc IV OUT size: %d, addr 0x%016llx\n", + xaction->enc_iv_out.len, + (unsigned long long)xaction->enc_iv_out.addr); + al_dbg(" Enc Next IV OUT size: %d, addr 0x%016llx\n", + xaction->enc_next_iv_out.len, + (unsigned long long)xaction->enc_next_iv_out.addr); + al_dbg(" Enc Offset %d Len %d\n", + xaction->enc_in_off, xaction->enc_in_len); + + al_dbg("-Auth fl_valid %d, first %d last %d\n", + xaction->auth_fl_valid, xaction->auth_first, + xaction->auth_last); + al_dbg(" Auth IV IN size: %d, addr 0x%016llx\n", + xaction->auth_iv_in.len, + (unsigned long long)xaction->auth_iv_in.addr); + al_dbg(" Auth IV OUT size: %d, addr 0x%016llx\n", + xaction->auth_iv_out.len, + (unsigned long long)xaction->auth_iv_out.addr); + al_dbg(" Auth SIGN IN size: %d, addr 0x%016llx\n", + xaction->auth_sign_in.len, + (unsigned long long)xaction->auth_sign_in.addr); + al_dbg(" Auth SIGN OUT size: %d, addr 0x%016llx\n", + xaction->auth_sign_out.len, + (unsigned long long)xaction->auth_sign_out.addr); + al_dbg(" Auth Offset %d Len %d\n", + xaction->auth_in_off, xaction->auth_in_len); + al_dbg(" Auth Byte Count %d\n", + xaction->auth_bcnt); + +} + +#else +#define al_print_crypto_desc(x) +#define al_print_crypto_xaction(x) +#endif + +/** + * Memcpy to HW SA + * + * @param dst destination buffer + * @param src source buffer + * @param size size in words + */ +static +void al_crypto_sa_copy(uint32_t *dst, uint8_t *src, uint32_t size) +{ + uint32_t i; + uint8_t *cdst = (uint8_t *)dst; + for (i = 0; i < size*4; i++) + cdst[i] = src[i]; +} + +/** + * Get number of rx submission descriptors needed for crypto transaction + * + * we need rx descriptor for each destination buffer. + * if the transaction doesn't have destination buffers, then one + * descriptor is needed + * + * @param xaction transaction context + * + * @return number of rx submission descriptors + */ +static INLINE +uint32_t al_crypto_xaction_rx_descs_count(struct al_crypto_transaction *xaction) +{ + uint32_t count = xaction->dst.num + (xaction->sa_out.len ? 1 : 0) + + (xaction->enc_iv_out.len ? 1 : 0) + + ((xaction->enc_next_iv_out.len || + xaction->auth_iv_out.len) ? 1 : 0) + + (xaction->auth_sign_out.len ? 1 : 0); + + /* valid rx descs count */ + al_assert(count <= AL_SSM_MAX_SRC_DESCS); + + return count; +} + +/** + * Get number of tx submission descriptors needed for crypto transaction + * + * we need tx descriptor for each source buffer. + * + * @param xaction transaction context + * + * @return number of tx submission descriptors + */ +static INLINE +uint32_t al_crypto_xaction_tx_descs_count(struct al_crypto_transaction *xaction) +{ + uint32_t count = xaction->src.num + (xaction->sa_in.len ? 1 : 0) + + (xaction->enc_iv_in.len ? 1 : 0) + + (xaction->auth_iv_in.len ? 1 : 0) + + (xaction->auth_sign_in.len ? 1 : 0); + + /* valid tx descs count */ + al_assert(count); + /* Need one for metadata if offsets are valid */ + count += (xaction->enc_in_len || xaction->auth_in_len) ? 1 : 0; + /*valid tx descs count*/ + al_assert(count <= AL_SSM_MAX_SRC_DESCS); + + return count; +} + +/** + * Fill one rx submission descriptor + * + * @param rx_udma_q rx udma handle + * @param flags flags for the descriptor + * @param buf destination buffer + * @param vmid virtual machine ID + */ +static INLINE +void al_crypto_prep_one_rx_desc(struct al_udma_q *rx_udma_q, + uint32_t flags, struct al_buf *buf, uint16_t vmid) +{ + uint64_t vmid_shifted = ((uint64_t)vmid) << AL_UDMA_DESC_VMID_SHIFT; + uint32_t flags_len = flags; + union al_udma_desc *rx_desc; + uint32_t ring_id; + + rx_desc = al_udma_desc_get(rx_udma_q); + /* get ring id */ + ring_id = al_udma_ring_id_get(rx_udma_q) + << AL_M2S_DESC_RING_ID_SHIFT; + + flags_len |= ring_id; + + flags_len |= buf->len & AL_M2S_DESC_LEN_MASK; + rx_desc->rx.len_ctrl = swap32_to_le(flags_len); + rx_desc->rx.buf1_ptr = swap64_to_le(buf->addr | vmid_shifted); + al_print_crypto_desc(rx_desc); +} + +/** + * Fill the crypto rx submission descriptors + * + * this function writes the contents of the rx submission descriptors + * + * @param rx_udma_q rx udma handle + * @param xaction transaction context + * @param rx_desc_cnt number of total rx descriptors + */ +static +void al_crypto_set_rx_descs(struct al_udma_q *rx_udma_q, + struct al_crypto_transaction *xaction, uint32_t rx_desc_cnt) +{ + uint32_t flags; + union al_udma_desc *rx_desc; + uint32_t buf_idx; + + /* Set descriptor flags */ + flags = (xaction->flags & AL_SSM_INTERRUPT) ? + AL_M2S_DESC_INT_EN : 0; + flags |= (xaction->flags & AL_SSM_DEST_NO_SNOOP) ? + AL_M2S_DESC_NO_SNOOP_H : 0; + + /* if the xaction doesn't have destination buffers, + * allocate single Meta descriptor, + */ + if (unlikely(!rx_desc_cnt)) { + al_debug("Crypto: Preparing Meta Rx dec\n"); + rx_desc = al_udma_desc_get(rx_udma_q); + flags |= al_udma_ring_id_get(rx_udma_q) + << AL_M2S_DESC_RING_ID_SHIFT; + flags |= RX_DESC_META; + /* write back flags */ + rx_desc->rx.len_ctrl = swap32_to_le(flags); + al_print_crypto_desc(rx_desc); + return; + } + + /* prepare descriptors for the required feilds */ + if (unlikely(xaction->sa_out.len)) { + al_debug("Crypto: Preparing SA out Rx desc\n"); + al_crypto_prep_one_rx_desc( + rx_udma_q, flags, &xaction->sa_out, xaction->misc_vmid); + } + + if (unlikely(xaction->enc_iv_out.len)) { + al_debug("Crypto: Preparing ENC IV out Rx desc\n"); + al_crypto_prep_one_rx_desc(rx_udma_q, flags, + &xaction->enc_iv_out, xaction->misc_vmid); + } + + if (xaction->dst.num) { + struct al_buf *buf = xaction->dst.bufs; + al_debug("Crypto: Preparing %d Crypto DST Rx desc\n", + xaction->dst.num); + for (buf_idx = 0; buf_idx < xaction->dst.num; buf_idx++) { + al_crypto_prep_one_rx_desc( + rx_udma_q, flags, buf, xaction->dst.vmid); + buf++; + } + } + + /* + * IV output:Encryption IV next to use or In case of auth only SA and + * auth_last isnt set, this is the intermidiate auto output. + */ + if (xaction->enc_next_iv_out.len) { + al_debug("Crypto: Preparing ENC Next IV OUT Rx desc\n"); + al_crypto_prep_one_rx_desc(rx_udma_q, flags, + &xaction->enc_next_iv_out, xaction->misc_vmid); + } else { + if (xaction->auth_iv_out.len) { + al_debug("Crypto: Preparing AUTH IV OUT Rx desc\n"); + al_crypto_prep_one_rx_desc(rx_udma_q, flags, + &xaction->auth_iv_out, xaction->misc_vmid); + } + } + + if (xaction->auth_sign_out.len) { + al_debug("Crypto: Preparing SIGN out Rx desc\n"); + al_crypto_prep_one_rx_desc(rx_udma_q, flags, + &xaction->auth_sign_out, xaction->misc_vmid); + } + +} + + +/** + * Fill one tx submission descriptor + * + * @param tx_udma_q tx udma handle + * @param flags flags for the descriptor + * @param meta metadata word1 + * @param buf source buffer + * @param vmid virtual machine ID + */ +static INLINE void al_crypto_prep_one_tx_desc(struct al_udma_q *tx_udma_q, + uint32_t flags, uint32_t meta, struct al_buf *buf, + uint16_t vmid) +{ + uint64_t vmid_shifted = ((uint64_t)vmid) << AL_UDMA_DESC_VMID_SHIFT; + uint32_t flags_len = flags; + union al_udma_desc *tx_desc; + uint32_t ring_id; + + tx_desc = al_udma_desc_get(tx_udma_q); + /* get ring id */ + ring_id = al_udma_ring_id_get(tx_udma_q) + << AL_M2S_DESC_RING_ID_SHIFT; + + flags_len |= ring_id; + + flags_len |= buf->len & AL_M2S_DESC_LEN_MASK; + tx_desc->tx.len_ctrl = swap32_to_le(flags_len); + tx_desc->tx.meta_ctrl = swap32_to_le(meta); + tx_desc->tx.buf_ptr = swap64_to_le(buf->addr | vmid_shifted); + al_print_crypto_desc(tx_desc); +} + +/** + * Fill the crypto tx submission descriptors + * + * this function writes the contents of the tx submission descriptors + * + * @param tx_udma_q tx udma handle + * @param xaction transaction context + * @param tx_desc_cnt number of total tx descriptors + */ +static +void al_crypto_set_tx_descs(struct al_udma_q *tx_udma_q, + struct al_crypto_transaction *xaction, uint32_t tx_desc_cnt) +{ + uint32_t flags; + uint32_t buf_idx; + uint32_t word1_meta; + uint32_t desc_cnt = tx_desc_cnt; + + /* Set flags */ + flags = AL_M2S_DESC_FIRST; + flags |= unlikely(xaction->flags & AL_SSM_SRC_NO_SNOOP) ? + AL_M2S_DESC_NO_SNOOP_H : 0; + + /* Set first desc word1 metatdata */ + word1_meta = AL_CRYPT_OP << TX_DESC_META_OP_SHIFT; + word1_meta |= xaction->dir << TX_DESC_META_CRYPT_DIR_SHIFT; + word1_meta |= unlikely(xaction->sa_out.len) ? + TX_DESC_META_CRYPT_S_SA : 0; + word1_meta |= unlikely(xaction->enc_iv_out.len) ? + TX_DESC_META_CRYPT_S_ENCIV : 0; + + word1_meta |= unlikely(xaction->dst.num) ? + TX_DESC_META_CRYPT_SEND_ORIG : 0; + + word1_meta |= + unlikely(xaction->enc_next_iv_out.len || + xaction->auth_iv_out.len) ? + TX_DESC_META_CRYPT_SEND_AUTHIV : 0; + + word1_meta |= likely(xaction->auth_sign_out.len) ? + TX_DESC_META_CRYPT_S_SIGN : 0; + + if (unlikely(xaction->auth_fl_valid)) { + word1_meta |= xaction->auth_first ? TX_DESC_META_AUTH_FIRST : 0; + word1_meta |= xaction->auth_last ? TX_DESC_META_AUTH_LAST : 0; + } else { + word1_meta |= TX_DESC_META_AUTH_FIRST | TX_DESC_META_AUTH_LAST; + } + + word1_meta |= unlikely(xaction->auth_sign_in.len) ? + TX_DESC_META_AUTH_VALID : 0; + + word1_meta |= (xaction->sa_indx << TX_DESC_META_SA_IDX_SHIFT) + & TX_DESC_META_SA_IDX_MASK; + + /* First Meta data desc */ + if ((xaction->enc_in_len) || (xaction->auth_in_len)) { + uint32_t flags_len = flags; + union al_udma_desc *tx_desc; + uint32_t ring_id; + uint32_t enc_meta; + uint32_t auth_meta; + + al_debug("Crypto: preparing metadata desc: enc_in_len %d " + "auth_in_len %d\n", + xaction->enc_in_len, xaction->auth_in_len); + al_debug(" metadata desc: enc_in_off %d " + "auth_in_off %d\n", + xaction->enc_in_off, xaction->auth_in_off); + /* having only metdata desc isnt valid */ + desc_cnt--; + /* Valid desc count */ + al_assert(desc_cnt); + + tx_desc = al_udma_desc_get(tx_udma_q); + /* UDMA feilds */ + ring_id = al_udma_ring_id_get(tx_udma_q) + << AL_M2S_DESC_RING_ID_SHIFT; + flags_len |= ring_id; + flags_len |= AL_M2S_DESC_META_DATA; + tx_desc->tx_meta.len_ctrl = swap32_to_le(flags_len); + /* Word1 metadata */ + tx_desc->tx_meta.meta_ctrl = 0; + if (xaction->auth_bcnt) { + /* Auth only, prev auth byte count */ + tx_desc->tx_meta.meta1 = + swap32_to_le(xaction->auth_bcnt); + } else { + /* Encryption offsets */ + enc_meta = (xaction->src_size - + (xaction->enc_in_len + xaction->enc_in_off)) + & TX_DESC_META_ENC_OFF_EOP_MASK; + enc_meta |= (xaction->enc_in_off + << TX_DESC_META_ENC_OFF_SHIFT) + & TX_DESC_META_ENC_OFF_MASK; + + tx_desc->tx_meta.meta1 = swap32_to_le(enc_meta); + } + /* Authentication offsets */ + auth_meta = (xaction->src_size - + (xaction->auth_in_len + xaction->auth_in_off)) + & TX_DESC_META_AUTH_OFF_EOP_MASK; + auth_meta |= (xaction->auth_in_off + << TX_DESC_META_AUTH_OFF_SHIFT) + & TX_DESC_META_AUTH_OFF_MASK; + tx_desc->tx_meta.meta2 = swap32_to_le(auth_meta); + al_print_crypto_desc(tx_desc); + /* clear first flag, keep no snoop hint flag */ + flags &= AL_M2S_DESC_NO_SNOOP_H; + } + + flags |= unlikely(xaction->flags & AL_SSM_BARRIER) ? + AL_M2S_DESC_DMB : 0; + + /* prepare descriptors for the SA_in if found */ + if (xaction->sa_in.len) { + al_debug("Crypto: Preparing SA Tx desc sa_index %d\n", + xaction->sa_indx); + /* check for last */ + flags |= (desc_cnt == 1) ? AL_M2S_DESC_LAST : 0; + desc_cnt--; + /* update buffer type in metadata */ + word1_meta |= AL_CRYPT_BUF_SA_UPDATE + << TX_DESC_META_BUF_TYPE_SHIFT; + + al_crypto_prep_one_tx_desc(tx_udma_q, flags, word1_meta, + &xaction->sa_in, xaction->misc_vmid); + word1_meta = 0; + /* clear first and DMB flags, keep no snoop hint flag */ + flags &= AL_M2S_DESC_NO_SNOOP_H; + } + + /* prepare descriptors for the enc_IV_in if found */ + if (likely(xaction->enc_iv_in.len)) { + al_debug("Crypto: Preparing IV in Tx desc\n"); + /* check for last */ + flags |= (desc_cnt == 1) ? AL_M2S_DESC_LAST : 0; + desc_cnt--; + /* update buffer type in metadata */ + word1_meta |= AL_CRYPT_BUF_ENC_IV + << TX_DESC_META_BUF_TYPE_SHIFT; + + al_crypto_prep_one_tx_desc(tx_udma_q, flags, word1_meta, + &xaction->enc_iv_in, xaction->misc_vmid); + word1_meta = 0; + /* clear first and DMB flags, keep no snoop hint flag */ + flags &= AL_M2S_DESC_NO_SNOOP_H; + } + + /* prepare descriptors for the auth_IV_in if found */ + if (unlikely(xaction->auth_iv_in.len)) { + al_debug("Crypto: Preparing Auth IV in Tx desc\n"); + /* check for last */ + flags |= (desc_cnt == 1) ? AL_M2S_DESC_LAST : 0; + desc_cnt--; + /* update buffer type in metadata */ + word1_meta |= AL_CRYPT_BUF_AUTH_IV + << TX_DESC_META_BUF_TYPE_SHIFT; + + al_crypto_prep_one_tx_desc(tx_udma_q, flags, word1_meta, + &xaction->auth_iv_in, xaction->misc_vmid); + word1_meta = 0; + /* clear first and DMB flags, keep no snoop hint flag */ + flags &= AL_M2S_DESC_NO_SNOOP_H; + } + + /* prepare descriptors for the source buffer if found */ + if (likely(xaction->src.num)) { + struct al_buf *buf = xaction->src.bufs; + al_debug("Crypto: Preparing SRC %d Tx desc\n", + xaction->src.num); + /* update buffer type in metadata */ + word1_meta |= AL_CRYPT_BUF_SRC << TX_DESC_META_BUF_TYPE_SHIFT; + + for (buf_idx = 0; buf_idx < xaction->src.num; buf_idx++) { + /* check for last */ + flags |= (desc_cnt == 1) ? AL_M2S_DESC_LAST : 0; + desc_cnt--; + + al_crypto_prep_one_tx_desc(tx_udma_q, flags, + word1_meta, buf, xaction->src.vmid); + word1_meta = 0; + /* clear first and DMB flags, keep no snoop hint flag */ + flags &= AL_M2S_DESC_NO_SNOOP_H; + flags |= AL_M2S_DESC_CONCAT; + buf++; + } + + /* clear first, concat and DMB flags, keep no snoop hint flag */ + flags &= AL_M2S_DESC_NO_SNOOP_H; + } + + /* prepare descriptors for the auth signature if found */ + if (unlikely(xaction->auth_sign_in.len)) { + al_debug("Crypto: Preparing Signature in Tx desc\n"); + /* if we are here then this is last */ + flags |= AL_M2S_DESC_LAST; + /* update buffer type in metadata */ + word1_meta |= AL_CRYPT_BUF_AUTH_SIGN + << TX_DESC_META_BUF_TYPE_SHIFT; + + al_crypto_prep_one_tx_desc(tx_udma_q, flags, word1_meta, + &xaction->auth_sign_in, xaction->misc_vmid); + } + +} + +/****************************** API functions *********************************/ +int al_crypto_hw_sa_init(struct al_crypto_sa *sa, + struct al_crypto_hw_sa *hw_sa) +{ + uint32_t tword; + + /* Word 0 */ + tword = 0; + /* Valid SA operation */ + al_assert(sa->sa_op != AL_CRYPT_RES); + sa_init_field(tword, sa->sa_op, CRYPT_SAD_OP_MASK, + CRYPT_SAD_OP_SHIFT, "valid sa_op"); + /* Encryption */ + if ((sa->sa_op != AL_CRYPT_AUTH_ONLY) || + (sa->auth_type == AL_CRYPT_AUTH_AES_GCM) || + (sa->auth_type == AL_CRYPT_AUTH_AES_CCM)) { + sa_init_field(tword, sa->enc_type, CRYPT_SAD_ENC_TYPE_MASK, + CRYPT_SAD_ENC_TYPE_SHIFT, "valid enc type"); + if ((sa->enc_type == AL_CRYPT_TRIPDES_ECB) || + (sa->enc_type == AL_CRYPT_TRIPDES_CBC)) { + sa_init_field(tword, + sa->tripdes_m, + CRYPT_SAD_TRIPDES_MODE_MASK, + CRYPT_SAD_TRIPDES_MODE_SHIFT, + "valid 3des mode"); + } + if (sa->enc_type > AL_CRYPT_TRIPDES_CBC) { + sa_init_field(tword, + sa->aes_ksize, + CRYPT_SAD_AES_KEY_SIZE_MASK, + CRYPT_SAD_AES_KEY_SIZE_SHIFT, + "valid aes key size"); + } + sa_init_field(tword, + sa->cntr_size, + CRYPT_SAD_CNTR_SIZE_MASK, + CRYPT_SAD_CNTR_SIZE_SHIFT, + "valid counter loop"); + } + + /* Authentication */ + if (sa->sa_op != AL_CRYPT_ENC_ONLY) { + sa_init_field(tword, + sa->auth_type, + CRYPT_SAD_AUTH_TYPE_MASK, + CRYPT_SAD_AUTH_TYPE_SHIFT, + "valid auth type"); + sa_init_field(tword, + sa->signature_size, + CRYPT_SAD_SIGN_SIZE_MASK, + CRYPT_SAD_SIGN_SIZE_SHIFT, + "valid sign size"); + if (sa->auth_type == AL_CRYPT_AUTH_SHA2) + sa_init_field(tword, + sa->sha2_mode, + CRYPT_SAD_SHA2_KEY_SIZE_MASK, + CRYPT_SAD_SHA2_KEY_SIZE_SHIFT, + "valid sha2 key size"); + tword |= sa->auth_signature_msb ? CRYPT_SAD_AUTH_MSB_BITS : 0; + tword |= sa->auth_hmac_en ? CRYPT_SAD_HMAC_EN : 0; + } + + /* Encryption + Authentication */ + if (sa->sa_op == AL_CRYPT_ENC_AUTH) { + tword |= sa->sign_after_enc ? CRYPT_SAD_SIGN_AFTER_ENC : 0; + tword |= sa->auth_after_dec ? CRYPT_SAD_AUTH_AFTER_DEC : 0; + } + + hw_sa->sa_word[0] = swap32_to_le(tword); + + /* Word 2 - Encryption offsets */ + tword = 0; + if (sa->sa_op != AL_CRYPT_AUTH_ONLY) { + sa_init_field(tword, + sa->enc_offset, + CRYPT_SAD_ENC_OFF_MASK, + CRYPT_SAD_ENC_OFF_SHIFT, + "valid enc off"); + sa_init_field(tword, + sa->enc_offset_eop, + CRYPT_SAD_ENC_OFF_EOP_MASK, + CRYPT_SAD_ENC_OFF_EOP_SHIFT, + "valid enc off eop"); + hw_sa->sa_word[2] = swap32_to_le(tword); + } + + /* Word 3 - Authentication offsets */ + tword = 0; + if (sa->sa_op != AL_CRYPT_ENC_ONLY) { + sa_init_field(tword, + sa->auth_offset, + CRYPT_SAD_AUTH_OFF_MASK, + CRYPT_SAD_AUTH_OFF_SHIFT, + "valid auth off"); + sa_init_field(tword, + sa->auth_offset_eop, + CRYPT_SAD_AUTH_OFF_EOP_MASK, + CRYPT_SAD_AUTH_OFF_EOP_SHIFT, + "valid auth off eop"); + hw_sa->sa_word[3] = swap32_to_le(tword); + } + + /* Other Words */ + /* CCM CBC IV */ + if (sa->enc_type == AL_CRYPT_AES_CCM) { + al_crypto_sa_copy( + &hw_sa->sa_word[CRYPT_SAD_CCM_CBC_IV_ADD_SWORD], + sa->enc_ccm_cbc_iv_add, CRYPT_SAD_CCM_CBC_IV_ADD_SIZE); + } + + /* Encryption Key and IV, also relevant for GCM Auth */ + if ((sa->sa_op != AL_CRYPT_AUTH_ONLY) || + (sa->auth_type == AL_CRYPT_AUTH_AES_GCM)) { + al_crypto_sa_copy(&hw_sa->sa_word[CRYPT_SAD_ENC_KEY_SWORD], + sa->enc_key, CRYPT_SAD_ENC_KEY_SIZE); + al_crypto_sa_copy(&hw_sa->sa_word[CRYPT_SAD_ENC_IV_SWORD], + sa->enc_iv, CRYPT_SAD_ENC_IV_SIZE); + } + + /* AES GCM IV */ + if (sa->enc_type == AL_CRYPT_AES_GCM) { + al_crypto_sa_copy(&hw_sa->sa_word[CRYPT_SAD_GCM_AUTH_IV_SWORD], + sa->aes_gcm_auth_iv, CRYPT_SAD_GCM_AUTH_IV_SIZE); + } + + /* Authentication */ + if (sa->sa_op != AL_CRYPT_ENC_ONLY) { + if (sa->auth_hmac_en) { + al_crypto_sa_copy( + &hw_sa->sa_word[CRYPT_SAD_HMAC_IV_IN_SWORD], + sa->hmac_iv_in, CRYPT_SAD_HMAC_IV_IN_SIZE); + al_crypto_sa_copy( + &hw_sa->sa_word[CRYPT_SAD_HMAC_IV_OUT_SWORD], + sa->hmac_iv_out, CRYPT_SAD_HMAC_IV_OUT_SIZE); + } + + } + /* IV for broken Auth, overlap GCM feilds + which dont support broken Auth */ + if ((sa->sa_op == AL_CRYPT_AUTH_ONLY) && + (sa->auth_type != AL_CRYPT_AUTH_AES_GCM)) { + al_crypto_sa_copy(&hw_sa->sa_word[CRYPT_SAD_AUTH_IV_SWORD], + sa->auth_iv_in, CRYPT_SAD_AUTH_IV_SIZE); + } + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_dma_prepare(struct al_ssm_dma *dma, uint32_t qid, + struct al_crypto_transaction *xaction) +{ + uint32_t rx_descs; + uint32_t tx_descs; + struct al_udma_q *rx_udma_q; + struct al_udma_q *tx_udma_q; + int rc; + + al_debug("al_crypto_dma_prepare\n"); + al_print_crypto_xaction(xaction); + + /* Check some parameters */ + /* SA out -> SA in */ + al_assert(!xaction->sa_out.len || + (xaction->sa_out.len && xaction->sa_in.len)); + /* Valid SA index */ + al_assert(!(xaction->sa_indx >> TX_DESC_META_SA_IDX_SHIFT + & ~TX_DESC_META_SA_IDX_MASK)); + /* Auth first has no iv_in */ + al_assert(!(xaction->auth_fl_valid && + xaction->auth_first && xaction->auth_iv_in.len)); + /* No last -> No sign_in */ + al_assert(!(xaction->auth_fl_valid && + !xaction->auth_last && xaction->auth_sign_in.len)); + /* Queue is for crypt/auth transactions */ + al_assert(dma->q_types[qid] == AL_CRYPT_AUTH_Q); + + /* calc tx (M2S) descriptors */ + tx_descs = al_crypto_xaction_tx_descs_count(xaction); + rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q); + /* valid crypto tx q handle */ + al_assert(!rc); + if (unlikely(al_udma_available_get(tx_udma_q) < tx_descs + + AL_CRYPT_DESC_RES)) { + al_dbg("crypt[%s]:tx q has no enough free desc", + dma->m2m_udma.name); + return -ENOSPC; + } + + /* calc rx (S2M) descriptors, at least one desc is required */ + rx_descs = al_crypto_xaction_rx_descs_count(xaction); + rc = al_udma_q_handle_get(&dma->m2m_udma.rx_udma, qid, &rx_udma_q); + /* valid crypto rx q handle */ + al_assert(!rc); + if (unlikely(al_udma_available_get(rx_udma_q) + < (rx_descs ? rx_descs : 1))) { + al_dbg("crypto [%s]: rx q has no enough free desc", + dma->m2m_udma.name); + return -ENOSPC; + } + + /* prepare rx descs */ + al_crypto_set_rx_descs(rx_udma_q, xaction, rx_descs); + /* add rx descriptors */ + al_udma_desc_action_add(rx_udma_q, rx_descs ? rx_descs : 1); + + /* prepare tx descriptors */ + al_crypto_set_tx_descs(tx_udma_q, xaction, tx_descs); + + /* set number of tx descriptors */ + xaction->tx_descs_count = tx_descs; + + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_dma_action(struct al_ssm_dma *dma, uint32_t qid, + int tx_descs) +{ + struct al_udma_q *tx_udma_q; + int rc; + + rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q); + /* valid CRC/CSUM tx q handle */ + al_assert(!rc); + + /* add tx descriptors */ + al_udma_desc_action_add(tx_udma_q, tx_descs); + return 0; +} + +/****************************************************************************** + *****************************************************************************/ +int al_crypto_dma_completion(struct al_ssm_dma *dma, uint32_t qid, + uint32_t *comp_status) +{ + struct al_udma_q *rx_udma_q; + struct al_udma_q *tx_udma_q; + volatile union al_udma_cdesc *cdesc; + int rc; + uint32_t cdesc_count; + + rc = al_udma_q_handle_get(&dma->m2m_udma.rx_udma, qid, &rx_udma_q); + /* valid comp rx q handle */ + al_assert(!rc); + + cdesc_count = al_udma_cdesc_packet_get(rx_udma_q, &cdesc); + if (!cdesc_count) + return 0; + + /* if we have multiple completion descriptors, + then last one will have the valid status */ + if (unlikely(cdesc_count > 1)) + cdesc = al_cdesc_next(rx_udma_q, cdesc, cdesc_count - 1); + + *comp_status = swap32_from_le(cdesc->al_desc_comp_rx.ctrl_meta) & + RX_COMP_STATUS_MASK; + + al_udma_cdesc_ack(rx_udma_q, cdesc_count); + + al_debug("crypto packet completed. count %d status desc %p meta %x\n", + cdesc_count, cdesc, cdesc->al_desc_comp_rx.ctrl_meta); + + /* cleanup tx completion queue */ + rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q); + /* valid comp tx q handle */ + al_assert(!rc); + + cdesc_count = al_udma_cdesc_get_all(tx_udma_q, NULL); + if (cdesc_count) + al_udma_cdesc_ack(tx_udma_q, cdesc_count); + + return 1; +} diff --git a/target/linux/alpine/files/drivers/dma/al/Kconfig b/target/linux/alpine/files/drivers/dma/al/Kconfig new file mode 100644 index 00000000000000..c9b025fef84818 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/Kconfig @@ -0,0 +1,20 @@ +config AL_DMA + tristate "Annapurna Labs DMA support" + depends on ARCH_ALPINE + select DMA_ENGINE + select ASYNC_TX_ENABLE_CHANNEL_SWITCH + help + Enable support for the Annapurna Labs DMA and RAID acceleration + engine. + +config AL_DMA_STATS + bool "Annapurna Labs DMA statistics enabled" + depends on AL_DMA + help + Enable Annapurna Labs DMA and RAID acceleration engine statistics. + +config AL_DMA_PCI_IOV + bool "Annapurna Labs DMA Virtual Function enabled" + depends on AL_DMA + help + Enable Annapurna Labs DMA Virtual Function. diff --git a/target/linux/alpine/files/drivers/dma/al/Makefile b/target/linux/alpine/files/drivers/dma/al/Makefile new file mode 100644 index 00000000000000..e38008857779a4 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/Makefile @@ -0,0 +1,9 @@ +ccflags-y := -I$(srctree)/arch/arm/mach-alpine/include + +obj-$(CONFIG_AL_DMA) += al_dma.o + +al_dma-objs := al_dma_main.o al_dma_core.o al_dma_cleanup.o al_dma_sysfs.o \ + al_dma_prep_interrupt.o al_dma_prep_memcpy.o \ + al_dma_prep_memset.o al_dma_prep_xor.o al_dma_prep_xor_val.o \ + al_dma_prep_pq.o al_dma_prep_pq_val.o al_hal_ssm_raid.o \ + al_dma_module_params.o diff --git a/target/linux/alpine/files/drivers/dma/al/README b/target/linux/alpine/files/drivers/dma/al/README new file mode 100644 index 00000000000000..553cbfbb3a2aeb --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/README @@ -0,0 +1,236 @@ +Linux driver for Annapurna Labs DMA device + +Architecture: +============= + +This driver implements standard Linux DMA device, the kernel communicates with +the driver using the dma_device structure (defined at +include/linux/dmaengine.h). The DMA device is implemented as integrated PCI-E +End point, hence the driver uses the PCI interface for probing the device and +other various management functions. + +The driver communicates with the hardware using the Annapurna Labs Storage and +Memory Services Acceleration Engine and UDMA HAL drivers. + +Internal Data Structures: +========================= +al_dma_device: +-------------- + This structure holds all the information needed to operate the adapter. + Fields: + - pdev: pointer to Linux PCI device structure + - raid_dma_params: data structure used to pass various parameters to the HAL + - udma_regs_base: UDMA registers base address + - app_regs_base: engine internal registers base address + - hal_raid: the HAL structure used by HAL to manage the adapter + - common: Linux DMA device structure + - irq_tbl: array of al_eth_irq, each interrupt used by the driver has entry + in this array. + - msix_entries: pointer to linux data structure used to communicate with the + kernel which entries to use for msix, and which irqs the kernel assigned + for those interrupts. + - channels: an array of channel information + - max_channels: the number of existing channels + - cache: kmem cache for allocating ring entries + +al_dma_chan: +------------ + This structure is used for saving the context of a single chhanel. + Fields: + - common: Linux DMA channel structure + - hal_raid: the HAL structure used by HAL to manage the adapter + - idx: the channel's index + - device: the parent device + - affinity_mask: cpu irq affinity mask + - sw_ring: SW descriptor ring + - tx_descs_num: number of descriptors in TX queue + - tx_dma_desc_vir: TX descriptor ring + - tx_dma_desc: TX descriptor ring physical base address + - rx_descs_num: number of descriptors in RX queue + - rx_dma_desc_vir: RX descriptor ring + - rx_dma_desc: RX descriptor ring physical base address + - rx_dma_cdesc_virt: RX completion descriptors ring + - rx_dma_cdesc: RX completion descriptors ring physical address + - kobj: sysfs kobj + - alloc_order: channel allocation order (log2 of the size) + - prep_lock: channel transaction preparation lock + - head: SW ring head + - sw_desc_num_locked: number of SW descriptors locked + - tx_desc_produced: number of tx descriptors produced and not issued + - stats_prep: preparation statistics + - cleanup_lock: operation completion cleanup lock + - cleanup_task: operation completion cleanup tasklet + - completed_cookie: completed cookie + - tail: SW ring tail + - stats_prep: operation completion statistics + +al_dma_chan_stats_prep: +----------------------- + DMA channel statistics - preparation + Fields: + - int_num: Total number of interrupt requests + - memcpy_num: Total number of memcpy operations + - memcpy_size: Total size of memcpy operations + - memset_num: Total number of memset operations + - memset_size: Total size of memset operations + - xor_num: Total number of xor operations + - xor_size: Total size of xor operations + - pq_num: Total number of pq operations + - pq_size: Total size of pq operations + - pq_val_num: Total number of pq validation operations + - pq_val_size: Total size of pq validation operations + - xor_val_num: Total number of xor validation operations + - xor_val_size: Total size of xor validation operations + - matching_cpu: Number of preparations with matching queue and cpu + - mismatching_cpu: Number of preparations with mismatching queue and cpu + +al_dma_chan_stats_comp: +----------------------- + DMA channel statistics - completion + Fields: + - redundant_int_cnt: Total number of redundant interrupts (interrupts for + which there was no completions + - matching_cpu: Number of completions with matching queue and cpu + - mismatching_cpu: Number of completions with mismatching queue and cpu + +Interrupts mode: +================ +The Annapurna Labs Storage and Memory Services Acceleration Engine supports the +TrueMultiCore(TM) technology and is based on Annapurna Labs Unified DMA (aka +UDMA), thus it has an interrupt controller that can generate legacy level +sensitive interrupt, or alternatively, MSI-X interrupt for each cause bit. + +The driver tries first to work in per-queue MSI-X mode for optimal performance, +with MSI-X interrupt for each channel. +If it fails to enable the per-queue MSI-X mode, it tries to use single MSI-X +interrupt for all the events. If it fails, it falls back to single legacy level +sensitive interrupt wire for all the events. + +The systems interrupts status can be viewed by the /proc/interrupts pseudo file. +when legacy mode used, the registered interrupt name will be: +al-dma-intx-all@pci: +when single MSI-X interrupt mode is used, the registered interrupt name will be: +al-dma-msix-all@pci: +and when per-queue MSI-X mode is used, for each channel an interrupt will be +registered with the following name: +al-dma-comp-. + +When working in the mode of interrupt per channel the irq affinity is set so +there will be a dedicated CPU per channel irq - this corresponds to DMA engine +behavior of balancing the channels between the CPUs so that channel X goes to +CPU X. + +Memory allocations: +=================== +Cache coherent buffers for following DMA rings: +- TX submission ring +- RX submission ring +- RX completion ring +kmem cache buffers for the SW rings. +All these buffers allocated upon channel creation and freed upon channel +destruction. + +MULTIQUEUE: +=========== +As part of the TrueMultiCore(TM) technology, the driver support multiqueue mode. +This mode have various benefits when channels are allocated to different CPU +cores/threads: +1. Reduced CPU/thread/process contention on a given channel +2. Cache miss rate on transaction completion is reduced +3. In hardware interrupt re-direction + +Channel selection: the driver is optimized for the case where number of cpus +equals to number of channels, in this case, each cpu mapped to a single channel. +This mapping is done by the DMA engine Linux layer. + +Interrupts affinity: +------------------- +In order to utilize the multiqueue benefits, the per-queue MSI-X mode should be +used. +In this mode the irq affinity is set so there will be a dedicated CPU per +channel irq - the same CPU that triggered the transaction and "owns" the +channel. + +Locks and atomic variables: +=========================== +The following locks and atomic variables are used in the driver: +- Prep lock for locking sw ring (al_dma_chan->prep_lock) +- Cleanup lock for completion ring in each channel (al_dma_chan->cleanup_lock) + +Work flow: +================== +1. The stack maps the source and destination buffers +2. The stack calls al_dma_prep_xxx_lock, which does the following: + - Populate the HAL structure used to transaction + - Updates relevant statistics + - Add the transaction to the SW ring +3. The stack calls al_dma_tx_submit_unlock, which adds the transaction to the + TX UDMA hardware +4. When the UDMA hardware finishes the transaction, it raises RX completion + interrupt, which schedules a completion tasklet +5. The completion tasklet performs the following: + - Acknowledges all completions + - Updates relevant statistics + - Unmaps buffers which are not marked for not unmapping + - Calls the transaction callback defined by the stack + +File structure +============== + +Module init and PCI registration +-------------------------------- +./al_dma.c + +Driver core +----------- +./al_dma_core.c +./al_dma.h + +Operation cleanup (upon completion) +----------------------------------- +./al_dma_cleanup.c + +Operation preparation +--------------------- +./al_dma_prep.h +./al_dma_prep_interrupt.c +./al_dma_prep_memcpy.c +./al_dma_prep_memset.c +./al_dma_prep_pq.c +./al_dma_prep_pq_val.c +./al_dma_prep_xor.c +./al_dma_prep_xor_val.c + +/sys FS registration +-------------------- +./al_dma_sysfs.c +./al_dma_sysfs.h + +Hardware abstraction layer +-------------------------- +./al_hal_raid_accelerator_regs.h +./al_hal_raid.c +./al_hal_raid.h + +Misc +---- +./README +./Makefile + + +TODO: +===== + - get number of channels/max lengh from device information + - implement intx isr + - interrupt registers configuration + - error handling + - self test + - Clean TODO spread in the code + - Add support for ins mod/rmmod + - Add support for enabling Group D int (errors) and report them + - SR-IOV support + - Add statistics that can later imply how the driver should be optimized: + Maxsize request. + Min size request. + Number of req < 4k req, number of req > 4k < 16k req, etc. + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma.h b/target/linux/alpine/files/drivers/dma/al/al_dma.h new file mode 100644 index 00000000000000..5d908632e2805d --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma.h @@ -0,0 +1,407 @@ +/* + * Annapurna Labs DMA Linux driver + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef AL_DMA_H +#define AL_DMA_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "al_hal_ssm_raid.h" + +#define AL_DMA_VERSION "0.01" + +#define AL_DMA_IRQNAME_SIZE 40 + +#define AL_DMA_MAX_SIZE_SHIFT_MEMCPY 16 /* 64KB */ +#define AL_DMA_MAX_SIZE_SHIFT_MEMSET 16 /* 64KB */ +#define AL_DMA_MAX_SIZE_SHIFT_XOR 14 /* 16KB */ +#define AL_DMA_MAX_SIZE_SHIFT_XOR_VAL 14 /* 16KB */ +#define AL_DMA_MAX_SIZE_SHIFT_PQ 13 /* 8KB */ +#define AL_DMA_MAX_SIZE_SHIFT_PQ_VAL 13 /* 8KB */ + +#define AL_DMA_ALIGN_SHIFT 0 /* No alignment requirements */ + +#ifndef CONFIG_ALPINE_VP_WA +#define AL_DMA_RAID_TX_CDESC_SIZE 8 +#define AL_DMA_RAID_RX_CDESC_SIZE 8 +#else +/* Currently in VP it is always 16 bytes */ +#define AL_DMA_RAID_TX_CDESC_SIZE 16 +#define AL_DMA_RAID_RX_CDESC_SIZE 16 +#endif + +#define AL_DMA_MAX_SIZE_MEMCPY (1 << AL_DMA_MAX_SIZE_SHIFT_MEMCPY) +#define AL_DMA_MAX_SIZE_MEMSET (1 << AL_DMA_MAX_SIZE_SHIFT_MEMSET) +#define AL_DMA_MAX_SIZE_XOR (1 << AL_DMA_MAX_SIZE_SHIFT_XOR) +#define AL_DMA_MAX_SIZE_XOR_VAL (1 << AL_DMA_MAX_SIZE_SHIFT_XOR_VAL) +#define AL_DMA_MAX_SIZE_PQ (1 << AL_DMA_MAX_SIZE_SHIFT_PQ) +#define AL_DMA_MAX_SIZE_PQ_VAL (1 << AL_DMA_MAX_SIZE_SHIFT_PQ_VAL) + +#define AL_DMA_MAX_XOR AL_SSM_MAX_SRC_DESCS + +#define AL_DMA_OP_MAX_BLOCKS (AL_DMA_MAX_XOR * 2) + +#define AL_DMA_MAX_CHANNELS 4 + +#define AL_DMA_SW_RING_MIN_ORDER 4 +#define AL_DMA_SW_RING_MAX_ORDER 16 + +/** + * Issue pending transaction upon sumbit: + * 0 - no, issue when issue_pending is called + * 1 - yes, and do nothing when issue_pending is called + */ +#define AL_DMA_ISSUE_PNDNG_UPON_SUBMIT 1 + +/*#define AL_DMA_MEMCPY_VALIDATION*/ +/*#define AL_DMA_XOR_VALIDATION*/ + +#ifdef CONFIG_AL_DMA_STATS +#define AL_DMA_STATS_INC(var, incval) { (var) += (incval); } + +#define AL_DMA_STATS_UPDATE(chan, num, cnt, size, size_inc) \ +{ \ + AL_DMA_STATS_INC((num), (cnt)); \ + \ + if (size_inc) \ + AL_DMA_STATS_INC((size), (size_inc)); \ + \ + AL_DMA_STATS_INC( \ + (chan)->stats_prep.matching_cpu, \ + (cnt) * (((chan)->idx == smp_processor_id()))); \ + \ + AL_DMA_STATS_INC( \ + (chan)->stats_prep.mismatching_cpu, \ + (cnt) * (!((chan)->idx == smp_processor_id()))); \ +} +#else +#define AL_DMA_STATS_INC(var, incval) +#define AL_DMA_STATS_UPDATE(chan, num, cnt, size, size_inc) +#endif + +enum al_unmap_type { + AL_UNMAP_SINGLE, + AL_UNMAP_PAGE, +}; + +struct al_dma_unmap_info_ent { + dma_addr_t handle; + size_t size; + int dir; + enum al_unmap_type type; +}; + +/** + * struct al_dma_sw_desc - software descriptor + */ +struct al_dma_sw_desc { + struct al_raid_transaction hal_xaction; + struct al_block blocks[AL_DMA_OP_MAX_BLOCKS]; + struct al_buf bufs[AL_DMA_OP_MAX_BLOCKS]; + + size_t len; + struct dma_async_tx_descriptor txd; + #ifdef DEBUG + int id; + #endif + + int last_is_pq_val; + enum sum_check_flags *pq_val_res; + + int last_is_xor_val; + enum sum_check_flags *xor_val_res; + + int last_is_memcpy; + +#ifdef AL_DMA_MEMCPY_VALIDATION + void *memcpy_dest; + void *memcpy_src; + size_t memcpy_len; +#endif + + int last_is_xor; + +#ifdef AL_DMA_XOR_VALIDATION + void *xor_dest; + int xor_src_cnt; + void *xor_src[AL_DMA_OP_MAX_BLOCKS]; + size_t xor_len; +#endif + + struct al_dma_unmap_info_ent unmap_info[AL_DMA_OP_MAX_BLOCKS]; + int umap_ent_cnt; +}; +#define to_al_dma_device(dev) container_of(dev, struct al_dma_device, common) +#define to_dev(al_dma_chan) (&(al_dma_chan)->device->pdev->dev) + +#ifdef CONFIG_AL_DMA_STATS +/** + * struct al_dma_chan_stats_prep - DMA channel statistics - preparation + * @int_num - Total number of interrupt requests + * @memcpy_num - Total number of memcpy operations + * @memcpy_size - Total size of memcpy operations + * @memset_num - Total number of memset operations + * @memset_size - Total size of memset operations + * @xor_num - Total number of xor operations + * @xor_size - Total size of xor operations + * @pq_num - Total number of pq operations + * @pq_size - Total size of pq operations + * @pq_val_num - Total number of pq validation operations + * @pq_val_size - Total size of pq validation operations + * @xor_val_num - Total number of xor validation operations + * @xor_val_size - Total size of xor validation operations + * @matching_cpu - Number of preparations with matching queue and cpu + * @mismatching_cpu - Number of preparations with mismatching queue and cpu + */ +struct al_dma_chan_stats_prep { + uint64_t int_num; + uint64_t memcpy_num; + uint64_t memcpy_size; + uint64_t sg_memcpy_num; + uint64_t sg_memcpy_size; + uint64_t memset_num; + uint64_t memset_size; + uint64_t xor_num; + uint64_t xor_size; + uint64_t pq_num; + uint64_t pq_size; + uint64_t pq_val_num; + uint64_t pq_val_size; + uint64_t xor_val_num; + uint64_t xor_val_size; + uint64_t matching_cpu; + uint64_t mismatching_cpu; +}; + +/** + * struct al_dma_chan_stats_prep - DMA channel statistics - completion + * @redundant_int_cnt - Total number of redundant interrupts (interrupts for + * which there was no completions + * @matching_cpu - Number of completions with matching queue and cpu + * @mismatching_cpu - Number of completions with mismatching queue and cpu + */ +struct al_dma_chan_stats_comp { + uint64_t redundant_int_cnt; + uint64_t matching_cpu; + uint64_t mismatching_cpu; +}; +#endif + +/* internal structure for AL Crypto IRQ + */ +struct al_dma_irq { + char name[AL_DMA_IRQNAME_SIZE]; +}; + +/** + * struct al_dma_device - internal representation of a DMA device + */ +struct al_dma_device { + struct pci_dev *pdev; + u16 dev_id; + u8 rev_id; + + struct al_ssm_dma_params ssm_dma_params; + void __iomem *udma_regs_base; + void __iomem *app_regs_base; + struct al_ssm_dma hal_raid; + + struct dma_device common; + + struct msix_entry msix_entries[AL_DMA_MAX_CHANNELS]; + struct al_dma_irq irq_tbl[AL_DMA_MAX_CHANNELS]; + struct al_dma_chan *channels[AL_DMA_MAX_CHANNELS]; + int max_channels; + + struct kmem_cache *cache; +}; + +/** + * struct al_dma_chan - internal representation of a DMA channel + */ +struct al_dma_chan { + /* Misc */ + struct dma_chan common ____cacheline_aligned; + struct al_ssm_dma *hal_raid; + int idx; + struct al_dma_device *device; + cpumask_t affinity_mask; + + /* SW descriptors ring */ + struct al_dma_sw_desc **sw_ring; + + /* Tx UDMA hw ring */ + int tx_descs_num; /* number of descriptors in Tx queue */ + void *tx_dma_desc_virt; /* Tx descriptors ring */ + dma_addr_t tx_dma_desc; + + /* Rx UDMA hw ring */ + int rx_descs_num; /* number of descriptors in Rx queue */ + void *rx_dma_desc_virt; /* Rx descriptors ring */ + dma_addr_t rx_dma_desc; + void *rx_dma_cdesc_virt; /* Rx completion descriptors ring */ + dma_addr_t rx_dma_cdesc; + + /* sysfs */ + struct kobject kobj; + + /* Channel allocation */ + u16 alloc_order; + + /* Preparation */ + spinlock_t prep_lock ____cacheline_aligned; + u16 head; + int sw_desc_num_locked; + uint32_t tx_desc_produced; +#ifdef CONFIG_AL_DMA_STATS + struct al_dma_chan_stats_prep stats_prep; +#endif + + /* Completion */ + spinlock_t cleanup_lock ____cacheline_aligned_in_smp; + struct tasklet_struct cleanup_task; + u16 tail; +#ifdef CONFIG_AL_DMA_STATS + struct al_dma_chan_stats_comp stats_comp; +#endif +}; + +static inline u16 al_dma_ring_size(struct al_dma_chan *chan) +{ + return 1 << chan->alloc_order; +} + +/* count of transactions in flight with the engine */ +static inline u16 al_dma_ring_active(struct al_dma_chan *chan) +{ + return CIRC_CNT(chan->head, chan->tail, al_dma_ring_size(chan)); +} + +static inline u16 al_dma_ring_space(struct al_dma_chan *chan) +{ + return CIRC_SPACE(chan->head, chan->tail, al_dma_ring_size(chan)); +} + +static inline struct al_dma_sw_desc * +al_dma_get_ring_ent(struct al_dma_chan *chan, u16 idx) +{ + return chan->sw_ring[idx & (al_dma_ring_size(chan) - 1)]; +} + +struct al_dma_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct dma_chan *, char *); +}; + +static inline struct al_dma_chan *to_al_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct al_dma_chan, common); +} + +/* wrapper around hardware descriptor format + additional software fields */ + + + +#ifdef DEBUG +#define set_desc_id(desc, i) ((desc)->id = (i)) +#define desc_id(desc) ((desc)->id) +#else +#define set_desc_id(desc, i) +#define desc_id(desc) (0) +#endif + +static inline struct al_dma_chan * +al_dma_chan_by_index(struct al_dma_device *device, int index) +{ + return device->channels[index]; +} + +static inline u32 al_dma_chansts(struct al_dma_chan *chan) +{ + u32 status = 0; + + return status; +} + +static inline void al_dma_unmap_info_ent_set( + struct al_dma_unmap_info_ent *ent, + dma_addr_t handle, + size_t size, + int dir, + enum al_unmap_type type) +{ + ent->handle = handle; + ent->size = size; + ent->dir = dir; + ent->type = type; +} + +int al_dma_get_sw_desc_lock( + struct al_dma_chan *chan, + int num); + +int al_dma_core_init( + struct al_dma_device *device, + void __iomem *iobase_udma, + void __iomem *iobase_app); + +int al_dma_fast_init( + struct al_dma_device *device, + void __iomem *iobase_udma); + +int al_dma_fast_terminate( + struct al_dma_device *device); + +int al_dma_core_terminate( + struct al_dma_device *device); + +int al_dma_cleanup_fn( + struct al_dma_chan *chan, + int from_tasklet); + +int udma_fast_memcpy(int len, al_phys_addr_t src, al_phys_addr_t dst); + +void al_dma_flr( + struct pci_dev *pdev); + +/** + * Submit pending SW descriptors (enlarge the head) and unlock the prep-lock + * in the case 'issue-pending' is responsible for submitting the HW descriptors + */ +void al_dma_tx_submit_sw_cond_unlock( + struct al_dma_chan *chan, + struct dma_async_tx_descriptor *tx); + +void al_dma_kobject_add(struct al_dma_device *device, struct kobj_type *type); +void al_dma_kobject_del(struct al_dma_device *device); +extern const struct sysfs_ops al_dma_sysfs_ops; +extern struct al_dma_sysfs_entry al_dma_version_attr; +extern struct al_dma_sysfs_entry al_dma_cap_attr; + +#endif /* AL_DMA_H */ + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_cleanup.c b/target/linux/alpine/files/drivers/dma/al/al_dma_cleanup.c new file mode 100644 index 00000000000000..9d605ff7d529b1 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_cleanup.c @@ -0,0 +1,314 @@ +/* + * Annapurna Labs DMA Linux driver - Operation completion cleanup + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include "../dmaengine.h" +#include "al_dma.h" + +#define smp_read_barrier_depends() do {} while(0) + +static void al_dma_cleanup_single( + struct al_dma_chan *chan, + struct al_dma_sw_desc *desc, + uint32_t comp_status); + +static inline void al_dma_cleanup_single_memcpy( + struct al_dma_chan *chan, + struct al_dma_sw_desc *desc); + +static inline void al_dma_cleanup_single_xor( + struct al_dma_chan *chan, + struct al_dma_sw_desc *desc); + +static inline void al_dma_cleanup_single_pq_val( + struct al_dma_chan *chan, + struct al_dma_sw_desc *desc, + uint32_t comp_status); + +static inline void al_dma_cleanup_single_xor_val( + struct al_dma_chan *chan, + struct al_dma_sw_desc *desc, + uint32_t comp_status); + + +static void al_dma_cleanup_unmap( + struct al_dma_chan *chan, + struct al_dma_sw_desc *desc); + +/****************************************************************************** + *****************************************************************************/ +int al_dma_cleanup_fn( + struct al_dma_chan *chan, + int from_tasklet) +{ + struct al_dma_sw_desc *desc; + uint32_t comp_status; + u16 active; + int idx, i; + uint32_t rc; + + dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x\n", + __func__, chan->head, chan->tail); + + spin_lock_bh(&chan->cleanup_lock); + + idx = chan->tail; + + active = al_dma_ring_active(chan); + for (i = 0; i < active; i++) { + rc = al_raid_dma_completion(chan->hal_raid, chan->idx, + &comp_status); + + /* if no completed transaction found -> exit */ + if (rc == 0) { + dev_dbg(to_dev(chan), "%s: No completion\n", + __func__); + + break; + } + + dev_dbg( + to_dev(chan), + "%s: completion status: %u\n", + __func__, + comp_status); + + /* This will instruct the CPU to make sure the index is up to + date before reading the new item */ + smp_read_barrier_depends(); + + desc = al_dma_get_ring_ent(chan, idx + i); + + al_dma_cleanup_single(chan, desc, comp_status); + } + + /* This will make sure the CPU has finished reading the item + before it writes the new tail pointer, which will erase the item */ + smp_mb(); + + chan->tail = idx + i; + + AL_DMA_STATS_INC(chan->stats_comp.matching_cpu, + i * ((chan->idx == smp_processor_id()))); + AL_DMA_STATS_INC(chan->stats_comp.mismatching_cpu, + i * (!(chan->idx == smp_processor_id()))); + + /* Keep track of redundant interrupts - interrupts that doesn't + yield completions */ + if (unlikely(from_tasklet && (!i))) { + AL_DMA_STATS_INC(chan->stats_comp.redundant_int_cnt, 1); + } + + spin_unlock_bh(&chan->cleanup_lock); + + return i; +}; + +/****************************************************************************** + *****************************************************************************/ +static inline void al_dma_cleanup_single( + struct al_dma_chan *chan, + struct al_dma_sw_desc *desc, + uint32_t comp_status) +{ + struct dma_async_tx_descriptor *tx; + + if (desc->last_is_memcpy) { + desc->last_is_memcpy = 0; + al_dma_cleanup_single_memcpy(chan, desc); + } else if (desc->last_is_xor) { + desc->last_is_xor = 0; + al_dma_cleanup_single_xor(chan, desc); + } else if (desc->last_is_pq_val) { + desc->last_is_pq_val = 0; + al_dma_cleanup_single_pq_val(chan, desc, comp_status); + } else if (desc->last_is_xor_val) { + desc->last_is_xor_val = 0; + al_dma_cleanup_single_xor_val(chan, desc, comp_status); + } + + tx = &desc->txd; + if (tx->cookie) { + dma_cookie_complete(tx); + al_dma_cleanup_unmap(chan, desc); + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; + } + } +}; + +/****************************************************************************** + *****************************************************************************/ +static inline void al_dma_cleanup_unmap( + struct al_dma_chan *chan, + struct al_dma_sw_desc *desc) +{ + struct pci_dev *pdev = chan->device->pdev; + int i; + + for (i = 0; i < desc->umap_ent_cnt; i++) { + struct al_dma_unmap_info_ent *ent = &desc->unmap_info[i]; + + switch (ent->type) { + case AL_UNMAP_SINGLE: + dma_unmap_single( + &pdev->dev, ent->handle, ent->size, ent->dir); + break; + case AL_UNMAP_PAGE: + dma_unmap_page(&pdev->dev, ent->handle, ent->size, ent->dir); + break; + } + } +} + +/****************************************************************************** + *****************************************************************************/ +static inline void al_dma_cleanup_single_memcpy( + struct al_dma_chan *chan, + struct al_dma_sw_desc *desc) +{ +#ifdef AL_DMA_MEMCPY_VALIDATION + if (memcmp(desc->memcpy_dest, desc->memcpy_src, desc->memcpy_len)) { + dev_err( + to_dev(chan), + "%s: memcpy (%p, %p, %d) failed!\n", + __func__, + desc->memcpy_dest, + desc->memcpy_src, + desc->memcpy_len); + } else + dev_dbg( + to_dev(chan), + "%s: memcpy (%p, %p, %d) ok!\n", + __func__, + desc->memcpy_dest, + desc->memcpy_src, + desc->memcpy_len); +#endif +} + +/****************************************************************************** + *****************************************************************************/ +static inline void al_dma_cleanup_single_pq_val( + struct al_dma_chan *chan, + struct al_dma_sw_desc *desc, + uint32_t comp_status) +{ + if (unlikely(comp_status & AL_RAID_P_VAL_ERROR)) { + dev_dbg( + to_dev(chan), + "%s: pq_val failed P!\n", + __func__); + + (*desc->pq_val_res) |= SUM_CHECK_P_RESULT; + } + + if (unlikely(comp_status & AL_RAID_Q_VAL_ERROR)) { + dev_dbg( + to_dev(chan), + "%s: pq_val failed Q!\n", + __func__); + + (*desc->pq_val_res) |= SUM_CHECK_Q_RESULT; + } +} + +/****************************************************************************** + *****************************************************************************/ +static inline void al_dma_cleanup_single_xor( + struct al_dma_chan *chan, + struct al_dma_sw_desc *desc) +{ +#ifdef AL_DMA_XOR_VALIDATION + int src_index; + int dest_index; + + for ( + dest_index = 0; + dest_index < desc->xor_len; + dest_index++) { + uint8_t byte_val = + ((uint8_t *)desc->xor_dest)[dest_index]; + + for ( + src_index = 0; + src_index < desc->xor_src_cnt; + src_index++) { + byte_val ^= + ((uint8_t *)desc->xor_src[ + src_index])[dest_index]; + } + + if (byte_val) { + if (desc->xoddr_src_cnt != 3) + dev_err( + to_dev(chan), + "%s: xor(%p, [%d srcs, %d) failed!\n", + __func__, + desc->xor_dest, + desc->xor_src_cnt, + desc->xor_len); + else + dev_err( + to_dev(chan), + "%s: xor(%p, %p, %p, %p, %d) failed!\n", + __func__, + desc->xor_dest, + desc->xor_src[0], + desc->xor_src[1], + desc->xor_src[2], + desc->xor_len); + + break; + } + } + + if (dest_index == desc->xor_len) { + dev_dbg( + to_dev(chan), + "%s: xor (%p, %d, %d) ok!\n", + __func__, + desc->xor_dest, + desc->xor_len, + desc->xor_src_cnt); + } +#endif +} + +/****************************************************************************** + *****************************************************************************/ +static inline void al_dma_cleanup_single_xor_val( + struct al_dma_chan *chan, + struct al_dma_sw_desc *desc, + uint32_t comp_status) +{ + if (unlikely(comp_status & AL_RAID_P_VAL_ERROR)) { + dev_dbg( + to_dev(chan), + "%s: xor_val failed P!\n", + __func__); + + (*desc->xor_val_res) |= SUM_CHECK_P_RESULT; + } +} + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_core.c b/target/linux/alpine/files/drivers/dma/al/al_dma_core.c new file mode 100644 index 00000000000000..16e4c23311acf4 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_core.c @@ -0,0 +1,1619 @@ +/* + * Annapurna Labs DMA Linux driver core + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../dmaengine.h" +#include "al_dma.h" +#include "al_dma_prep.h" +#include "al_dma_sysfs.h" +#include "al_dma_module_params.h" + +MODULE_LICENSE("GPL"); + +static dma_cookie_t al_dma_tx_submit_unlock( + struct dma_async_tx_descriptor *tx); + +static void al_dma_free_chan_resources( + struct dma_chan *c); + +static int al_dma_alloc_chan_resources( + struct dma_chan *c); + +static enum dma_status al_dma_tx_status( + struct dma_chan *c, + dma_cookie_t cookie, + struct dma_tx_state *txstate); + +static void al_dma_issue_pending( + struct dma_chan *c); + +static int al_dma_device_terminate_all(struct dma_chan *dchan); + +static int al_dma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config); + + +static int al_dma_setup_interrupts( + struct al_dma_device *device); + +static void al_dma_free_interrupts( + struct al_dma_device *device); + +static irqreturn_t al_dma_do_interrupt( + int irq, + void *data); + +static irqreturn_t al_dma_do_interrupt_msix( + int irq, + void *data); + +static int al_dma_init_channels( + struct al_dma_device *device, + int max_channels); + +static void al_dma_init_channel( + struct al_dma_device *device, + struct al_dma_chan *chan, + int idx); + +static struct al_dma_sw_desc **al_dma_alloc_sw_ring( + struct al_dma_chan *chan, + int order, + gfp_t flags); + +static struct al_dma_sw_desc *al_dma_alloc_ring_ent( + struct al_dma_chan *chan, + gfp_t flags); + +static void al_dma_free_ring_ent( + struct al_dma_sw_desc *desc, + struct al_dma_chan *chan); + +static void al_dma_cleanup_tasklet(unsigned long data); + +/****************************************************************************** + *****************************************************************************/ +int al_dma_core_init( + struct al_dma_device *device, + void __iomem *iobase_udma, + void __iomem *iobase_app) +{ + int32_t rc; + + struct dma_device *dma = &device->common; + int err; + struct al_udma_m2s_pkt_len_conf pkt_len_conf; + struct al_udma *tx_udma; + int max_channels = al_dma_get_max_channels(); + + dev_dbg( + dma->dev, + "%s(%p, %p, %p)\n", + __func__, + device, + iobase_udma, + iobase_app); + + device->cache = kmem_cache_create( + "al_dma", + sizeof(struct al_dma_sw_desc), + 0, + SLAB_HWCACHE_ALIGN, + NULL); + if (!device->cache) + return -ENOMEM; + + device->max_channels = max_channels; + + device->udma_regs_base = iobase_udma; + device->app_regs_base = iobase_app; + + memset(&device->ssm_dma_params, 0, sizeof(struct al_ssm_dma_params)); + device->ssm_dma_params.dev_id = device->dev_id; + device->ssm_dma_params.rev_id = device->rev_id; + device->ssm_dma_params.udma_regs_base = device->udma_regs_base; + + device->ssm_dma_params.name = + kmalloc(strlen(dev_name(device->common.dev)) + 1, GFP_KERNEL); + if (device->ssm_dma_params.name == NULL) { + dev_err(device->common.dev, "kmalloc failed\n"); + return -1; + } + + memcpy( + device->ssm_dma_params.name, + dev_name(device->common.dev), + strlen(dev_name(device->common.dev)) + 1); + + device->ssm_dma_params.num_of_queues = max_channels; + + rc = al_ssm_dma_init(&device->hal_raid, &device->ssm_dma_params); + if (rc) { + dev_err(device->common.dev, "al_raid_dma_init failed\n"); + return rc; + } + + al_raid_init(device->app_regs_base); + + /* set max packet size to 512k (XOR with 32 sources) */ + rc = al_ssm_dma_handle_get( + &device->hal_raid, + UDMA_TX, + &tx_udma); + if (rc) { + dev_err(device->common.dev, "al_raid_dma_handle_get failed\n"); + return rc; + } + + pkt_len_conf.encode_64k_as_zero = AL_FALSE; + pkt_len_conf.max_pkt_size = SZ_512K; + rc = al_udma_m2s_packet_size_cfg_set(tx_udma, &pkt_len_conf); + if (rc) { + dev_err(device->common.dev, + "al_udma_m2s_packet_size_cfg_set failed\n"); + return rc; + } + + /* enumerate and initialize channels (queues) */ + al_dma_init_channels(device, max_channels); + + /* enable RAID DMA engine */ + rc = al_ssm_dma_state_set(&device->hal_raid, UDMA_NORMAL); + + dma->dev = &device->pdev->dev; + + dma->device_alloc_chan_resources = al_dma_alloc_chan_resources; + dma->device_free_chan_resources = al_dma_free_chan_resources; + dma->device_tx_status = al_dma_tx_status; + dma->device_issue_pending = al_dma_issue_pending; + dma->device_config = al_dma_device_config; + dma->device_terminate_all = al_dma_device_terminate_all; + + if (al_dma_get_op_support_interrupt()) { + dma_cap_set(DMA_INTERRUPT, dma->cap_mask); + dma->device_prep_dma_interrupt = al_dma_prep_interrupt_lock; + } + + if (al_dma_get_op_support_memcpy()) { + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->device_prep_dma_memcpy = al_dma_prep_memcpy_lock; + } + + if (al_dma_get_op_support_memset()) { + dma_cap_set(DMA_MEMSET, dma->cap_mask); + dma->device_prep_dma_memset = al_dma_prep_memset_lock; + } + + if (al_dma_get_op_support_xor()) { + dma_cap_set(DMA_XOR, dma->cap_mask); + dma->device_prep_dma_xor = al_dma_prep_xor_lock; + dma->max_xor = AL_DMA_MAX_XOR; + } + + if (al_dma_get_op_support_pq()) { + dma_cap_set(DMA_PQ, dma->cap_mask); + dma->device_prep_dma_pq = al_dma_prep_pq_lock; + } + + if (al_dma_get_op_support_pq_val()) { +#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA + dma_cap_set(DMA_PQ_VAL, dma->cap_mask); + dma->device_prep_dma_pq_val = al_dma_prep_pq_val_lock; +#endif + } + + if (al_dma_get_op_support_pq()) + dma_set_maxpq(dma, AL_DMA_MAX_XOR - 2, 0); + else if (al_dma_get_op_support_pq_val()) { +#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA + dma_set_maxpq(dma, AL_DMA_MAX_XOR - 2, 0); +#endif + } + + if (al_dma_get_op_support_xor_val()) { +#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA + dma_cap_set(DMA_XOR_VAL, dma->cap_mask); + dma->device_prep_dma_xor_val = al_dma_prep_xor_val_lock; +#endif + } + +#ifdef CONFIG_ALPINE_VP_WA + dma->copy_align = AL_DMA_ALIGN_SHIFT; + dma->xor_align = AL_DMA_ALIGN_SHIFT; + dma->pq_align = AL_DMA_ALIGN_SHIFT; + dma->fill_align = AL_DMA_ALIGN_SHIFT; +#endif + +#ifdef CONFIG_DMATEST + /* Reserve for DMA test */ + dma_cap_set(DMA_PRIVATE, dma->cap_mask); +#endif + + err = al_dma_setup_interrupts(device); + + if (err) { + dev_err(device->common.dev, "failed to setup interrupts\n"); + return err; + } + + err = dma_async_device_register(&device->common); + + if (err) + dev_err(device->common.dev, "failed to register dma device\n"); + + return err; +} + + +/****************************************************************************** + ***************************** Fast DMA **************************************/ +#define FAST_DMA_NUM_OF_QUEUES 4 +#define FAST_DMA_MEMCPY_TIMEOUT 1000 /* in uSec */ +#define FAST_DMA_DESCS_COUNT 8 +#define FAST_DMA_TX_CDESCS_COUNT 8 +#define FAST_DMA_RX_CDESCS_COUNT 8 + +DEFINE_PER_CPU(struct al_udma_q *, tx_udma_q_percpu); +DEFINE_PER_CPU(struct al_udma_q *, rx_udma_q_percpu); +DEFINE_PER_CPU(uint32_t *, temp_percpu); +DEFINE_PER_CPU(al_phys_addr_t, temp_phys_addr_percpu); + +al_phys_addr_t tx_dma_desc_phys[FAST_DMA_NUM_OF_QUEUES]; +al_phys_addr_t rx_dma_desc_phys[FAST_DMA_NUM_OF_QUEUES]; +al_phys_addr_t rx_dma_cdesc_phys[FAST_DMA_NUM_OF_QUEUES]; +void *tx_dma_desc_virt[FAST_DMA_NUM_OF_QUEUES]; +void *rx_dma_desc_virt[FAST_DMA_NUM_OF_QUEUES]; +void *rx_dma_cdesc_virt[FAST_DMA_NUM_OF_QUEUES]; + +uint64_t al_pcie_read_addr_start[AL_SB_PCIE_NUM]; +uint64_t al_pcie_read_addr_end[AL_SB_PCIE_NUM]; +uint64_t al_pcie_write_addr_start[AL_SB_PCIE_NUM]; +uint64_t al_pcie_write_addr_end[AL_SB_PCIE_NUM]; +bool al_pcie_address_valid[AL_SB_PCIE_NUM] = {0}; + +bool fast_dma_init = false; + +/****************************************************************************** + *****************************************************************************/ +/* Prepare queue for fast mode */ +static void ssm_udma_fast_init(struct al_ssm_dma *ssm_dma, struct device* dev) +{ + struct al_memcpy_transaction xaction; + struct al_udma_q *tx_udma_q, *rx_udma_q; + uint32_t *temp; + al_phys_addr_t temp_phys_addr; + int cpu; + + for_each_possible_cpu(cpu) { + tx_udma_q = al_ssm_dma_tx_queue_handle_get(ssm_dma, cpu); + rx_udma_q = al_ssm_dma_rx_queue_handle_get(ssm_dma, cpu); + + memset(&xaction, 0, sizeof(struct al_memcpy_transaction)); + al_udma_fast_memcpy_q_prepare(tx_udma_q, rx_udma_q, &xaction); + + /* Allocate temp memory */ + temp = dma_alloc_coherent(dev, + sizeof(uint32_t), + &temp_phys_addr, + GFP_KERNEL); + + per_cpu(tx_udma_q_percpu, cpu) = tx_udma_q; + per_cpu(rx_udma_q_percpu, cpu) = rx_udma_q; + per_cpu(temp_percpu, cpu) = temp; + per_cpu(temp_phys_addr_percpu, cpu) = temp_phys_addr; + } +} + +static void ssm_udma_fast_terminate(struct device* dev) +{ + uint32_t *temp; + al_phys_addr_t temp_phys_addr; + int cpu; + + for_each_possible_cpu(cpu) { + temp = per_cpu(temp_percpu, cpu); + temp_phys_addr = per_cpu(temp_phys_addr_percpu, cpu); + + /* if not set, don't free */ + if (!temp) + continue; + + dma_free_coherent(dev, + sizeof(uint32_t), + temp, + temp_phys_addr); + + } +} + +/****************************************************************************** + *****************************************************************************/ +int al_dma_fast_init( + struct al_dma_device *device, + void __iomem *iobase_udma) +{ + int32_t rc; + int i; + + struct dma_device *dma = &device->common; + struct al_udma_m2s_pkt_len_conf pkt_len_conf; + struct al_udma *tx_udma; + + struct al_udma_q_params tx_params; + struct al_udma_q_params rx_params; + + int max_channels = al_dma_get_max_channels(); + + dev_dbg( + dma->dev, + "%s(%p, %p)\n", + __func__, + device, + iobase_udma); + + al_assert(FAST_DMA_NUM_OF_QUEUES >= NR_CPUS); + + device->max_channels = max_channels; + + device->udma_regs_base = iobase_udma; + device->app_regs_base = NULL; + + memset(&device->ssm_dma_params, 0, sizeof(struct al_ssm_dma_params)); + device->ssm_dma_params.dev_id = device->dev_id; + device->ssm_dma_params.rev_id = device->rev_id; + device->ssm_dma_params.udma_regs_base = device->udma_regs_base; + + device->ssm_dma_params.name = + kmalloc(strlen(dev_name(device->common.dev)) + 1, GFP_KERNEL); + if (device->ssm_dma_params.name == NULL) { + dev_err(device->common.dev, "kmalloc failed\n"); + return -1; + } + + memcpy( + device->ssm_dma_params.name, + dev_name(device->common.dev), + strlen(dev_name(device->common.dev)) + 1); + + device->ssm_dma_params.num_of_queues = max_channels; + + rc = al_ssm_dma_init(&device->hal_raid, &device->ssm_dma_params); + if (rc) { + dev_err(device->common.dev, "al_raid_dma_init failed\n"); + return rc; + } + + rc = al_ssm_dma_handle_get( + &device->hal_raid, + UDMA_TX, + &tx_udma); + if (rc) { + dev_err(device->common.dev, "al_raid_dma_handle_get failed\n"); + return rc; + } + + /* set max packet size to 128 (XOR with 32 sources) */ + /* TODO reduce max pkt size to 32 */ + pkt_len_conf.encode_64k_as_zero = AL_FALSE; + pkt_len_conf.max_pkt_size = SZ_128; + rc = al_udma_m2s_packet_size_cfg_set(tx_udma, &pkt_len_conf); + if (rc) { + dev_err(device->common.dev, + "al_udma_m2s_packet_size_cfg_set failed\n"); + return rc; + } + + /* enable RAID DMA engine */ + rc = al_ssm_dma_state_set(&device->hal_raid, UDMA_NORMAL); + + dma->dev = &device->pdev->dev; + + /* Init dma queue using the params below */ + for (i = 0; i < FAST_DMA_NUM_OF_QUEUES; i++) { + /* Allocate dma queue memory */ + /* allocate coherent memory for Tx submission descriptors */ + tx_dma_desc_virt[i] = dma_alloc_coherent( + dma->dev, + FAST_DMA_DESCS_COUNT * sizeof(union al_udma_desc), + &tx_dma_desc_phys[i], + GFP_KERNEL); + + /* allocate coherent memory for Rx submission descriptors */ + rx_dma_desc_virt[i] = dma_alloc_coherent( + dma->dev, + FAST_DMA_DESCS_COUNT * sizeof(union al_udma_desc), + &rx_dma_desc_phys[i], + GFP_KERNEL); + + /* Allocate memory for Rx completion descriptors */ + /* allocate coherent memory for Rx submission descriptors */ + rx_dma_cdesc_virt[i] = dma_alloc_coherent( + dma->dev, + FAST_DMA_RX_CDESCS_COUNT * sizeof(union al_udma_cdesc), + &rx_dma_cdesc_phys[i], + GFP_KERNEL); + + /* Fill in dma queue params */ + tx_params.size = FAST_DMA_DESCS_COUNT; + tx_params.desc_base = tx_dma_desc_virt[i]; + tx_params.desc_phy_base = tx_dma_desc_phys[i]; + tx_params.cdesc_base = NULL; /* don't use Tx completion ring */ + tx_params.cdesc_phy_base = 0; + tx_params.cdesc_size = FAST_DMA_TX_CDESCS_COUNT; + + rx_params.size = FAST_DMA_DESCS_COUNT; + rx_params.desc_base = rx_dma_desc_virt[i]; + rx_params.desc_phy_base = rx_dma_desc_phys[i]; + rx_params.cdesc_base = rx_dma_cdesc_virt[i]; + rx_params.cdesc_phy_base = rx_dma_cdesc_phys[i]; + rx_params.cdesc_size = FAST_DMA_RX_CDESCS_COUNT; + + rc += al_ssm_dma_q_init(&device->hal_raid, i, + &tx_params, &rx_params, AL_MEM_CRC_MEMCPY_Q); + } + + ssm_udma_fast_init(&device->hal_raid, dma->dev); + + fast_dma_init = true; + + return rc; +} +int al_dma_fast_terminate(struct al_dma_device *device) +{ + int i; + struct dma_device *dma = &device->common; + + dev_dbg( + dma->dev, + "%s(%p)\n", + __func__, + device); + + fast_dma_init = false; + + ssm_udma_fast_terminate(dma->dev); + + for (i=0; i < FAST_DMA_NUM_OF_QUEUES; i++) { + dma_free_coherent( + dma->dev, + FAST_DMA_RX_CDESCS_COUNT * sizeof(union al_udma_cdesc), + rx_dma_cdesc_virt[i], + rx_dma_cdesc_phys[i]); + + dma_free_coherent( + dma->dev, + FAST_DMA_DESCS_COUNT * sizeof(union al_udma_desc), + rx_dma_desc_virt[i], + rx_dma_desc_phys[i]); + + dma_free_coherent( + dma->dev, + FAST_DMA_DESCS_COUNT * sizeof(union al_udma_desc), + tx_dma_desc_virt[i], + tx_dma_desc_phys[i]); + } + + kfree(device->ssm_dma_params.name); + + return 0; +} +/****************************************************************************** + *****************************************************************************/ +/* Fast memcopy submission */ +int udma_fast_memcpy(int len, al_phys_addr_t src, al_phys_addr_t dst) +{ + struct al_udma_q *tx_udma_q, *rx_udma_q; + + union al_udma_desc *tx_desc; + union al_udma_desc *rx_desc; + int completed = 0; + int timeout = FAST_DMA_MEMCPY_TIMEOUT; + uint32_t flags; + /* prepare rx desc */ + + rx_udma_q = __this_cpu_read(rx_udma_q_percpu); + tx_udma_q = __this_cpu_read(tx_udma_q_percpu); + + rx_desc = al_udma_desc_get(rx_udma_q); + + flags = al_udma_ring_id_get(rx_udma_q) << + AL_M2S_DESC_RING_ID_SHIFT; + + al_udma_fast_desc_flags_set(rx_desc, flags, AL_M2S_DESC_RING_ID_MASK); + al_udma_fast_desc_len_set(rx_desc, len); + al_udma_fast_desc_buf_set(rx_desc, dst, 0); + + /* submit rx desc */ + al_udma_desc_action_add(rx_udma_q, 1); + + /* prepare tx desc */ + tx_desc = al_udma_desc_get(tx_udma_q); + + flags = al_udma_ring_id_get(tx_udma_q) << + AL_M2S_DESC_RING_ID_SHIFT; + + al_udma_fast_desc_flags_set(tx_desc, flags, AL_M2S_DESC_RING_ID_MASK); + al_udma_fast_desc_len_set(tx_desc, len); + al_udma_fast_desc_buf_set(tx_desc, src, 0); + + /* submit tx desc */ + al_udma_desc_action_add(tx_udma_q, 1); + + /* wait for completion using polling */ + while(1) { + completed = al_udma_fast_completion(rx_udma_q, 1, 0); + if ((completed > 0) || (timeout == 0)) + break; + + udelay(1); + timeout--; + } + + if (timeout == 0) { + pr_err("%s: Didn't receive completion in %d uSec", + __func__, FAST_DMA_MEMCPY_TIMEOUT); + + return -ETIME; + } + + return 0; +} +EXPORT_SYMBOL(udma_fast_memcpy); + +static inline al_phys_addr_t virt_to_physical_address(const volatile void __iomem *address) +{ + al_phys_addr_t phys_addr; + uint32_t phys_addr_h, phys_addr_l; + + /* + * write a virt. address to ATS1CPR: + * perform H/W stage1 address translation (meaning, to IPA) + * translate as current security state, privileged read accesses + * read PAR: (physical address register) + * lower 12-bit have some flags, the rest holds upper bits + * of the physical address + */ + asm volatile( "mcr p15, 0, %0, c7, c8, 0" :: "r"(address)); + + /* + * according to ARM ABI, in Little Endian systems r0 will contain the + * low 32 bits, while in Big Endian systems r0 will contain the high 32 + * bits + * TODO: assumes LE need to change to BE mode + */ + +#ifdef CONFIG_CPU_BIG_ENDIAN +#error "virt_to_physical_address assumes LE!" +#endif + asm volatile("mrrc p15, 0, %0, %1, c7" : "=r"(phys_addr_l), "=r"(phys_addr_h)); + + /* Take the lower 12-bit from the virtual address. */ + phys_addr = phys_addr_l & ~(((uint32_t)1<<12) - 1UL); + phys_addr |= (uintptr_t)address & AL_BIT_MASK(12); + + return phys_addr; +} + +#ifdef CONFIG_AL_PCIE_DEADLOCK_WA_VALIDATE +#define _al_dma_dma_read_validate(type, val) \ +{ \ + type _cpu_val; \ + switch (sizeof(type)) { \ + case sizeof(uint8_t): \ + _cpu_val = __raw_readb(address); \ + break; \ + case sizeof(uint16_t): \ + _cpu_val = le16_to_cpu((__force __le16)__raw_readw(address)); \ + break; \ + default: \ + case sizeof(uint32_t): \ + _cpu_val = le32_to_cpu((__force __le32)__raw_readl(address)); \ + break; \ + } \ + \ + if (memcmp(&_cpu_val, &val, sizeof(type))) { \ + al_info("[%s] Potential Error: DMA read value isn't the same as CPU read addr: " \ + "%p phys addr %x DMA read: %x cpu read: %x\n" \ + "This register might be clear on read or status register so different values" \ + "doesn't guarantee we have a problem, Please check the spec\n", \ + __func__, address, phys_addr, val, _cpu_val); \ + val = _cpu_val; \ + } \ +} +#else +#define _al_dma_dma_read_validate(type, val) +#endif + +static inline uint32_t _al_dma_read_reg(const volatile void __iomem *address, int size) +{ + unsigned long flags; + al_phys_addr_t phys_addr; + uint32_t val_32; + uint16_t val_16; + uint8_t val_8; + int i; + + /* Use DMA read only if the fast DMA was initialized and HW CC */ + if (likely((al_fabric_hwcc_enabled()) && (fast_dma_init))) { + local_irq_save(flags); + + phys_addr = virt_to_physical_address(address); + + for (i = 0; i < AL_SB_PCIE_NUM; i++) { + if (likely(al_pcie_address_valid[i] == false)) + continue; + + if (unlikely(phys_addr >= al_pcie_read_addr_start[i] && + phys_addr <= al_pcie_read_addr_end[i])) + goto pcie_mem_read; + } + + local_irq_restore(flags); + } + + + switch (size) { + case sizeof(uint8_t): + val_8 = __raw_readb(address); + return val_8; + case sizeof(uint16_t): + val_16 = le16_to_cpu((__force __le16)__raw_readw(address)); + return val_16; + default: + case sizeof(uint32_t): + val_32 = le32_to_cpu((__force __le32)__raw_readl(address)); + return val_32; + } + +pcie_mem_read: + udma_fast_memcpy(size, + phys_addr, + __this_cpu_read(temp_phys_addr_percpu)); + + switch (size) { + default: + case sizeof(uint32_t): + val_32 = *__this_cpu_read(temp_percpu); + _al_dma_dma_read_validate(uint32_t, val_32); + local_irq_restore(flags); + return val_32; + case sizeof(uint16_t): + val_16 = *__this_cpu_read(temp_percpu); + _al_dma_dma_read_validate(uint16_t, val_16); + local_irq_restore(flags); + return val_16; + case sizeof(uint8_t): + val_8 = *__this_cpu_read(temp_percpu); + _al_dma_dma_read_validate(uint8_t, val_8); + local_irq_restore(flags); + return val_8; + } +} + +uint32_t al_dma_read_reg32(const volatile void __iomem *address) +{ + return _al_dma_read_reg(address, sizeof(uint32_t)); +} +EXPORT_SYMBOL(al_dma_read_reg32); + +uint16_t al_dma_read_reg16(const volatile void __iomem *address) +{ + return _al_dma_read_reg(address, sizeof(uint16_t)); +} +EXPORT_SYMBOL(al_dma_read_reg16); + +uint8_t al_dma_read_reg8(const volatile void __iomem *address) +{ + return _al_dma_read_reg(address, sizeof(uint8_t)); +} +EXPORT_SYMBOL(al_dma_read_reg8); + + +void al_dma_write_reg32(volatile void __iomem *address, u32 val) +{ + unsigned long flags; + al_phys_addr_t phys_addr; + int i; + + /* Use DMA write only if the fast DMA was initialized and HW CC */ + if (likely((al_fabric_hwcc_enabled()) && (fast_dma_init))) { + local_irq_save(flags); + + phys_addr = virt_to_physical_address(address); + + for (i = 0; i < AL_SB_PCIE_NUM; i++) { + if (likely(al_pcie_address_valid[i] == false)) + continue; + + if (unlikely(phys_addr >= al_pcie_write_addr_start[i] && + phys_addr <= al_pcie_write_addr_end[i])) + goto pcie_mem_write; + } + + local_irq_restore(flags); + } + + __raw_writel((__force u32) cpu_to_le32(val), address); + + return; + +pcie_mem_write: + __this_cpu_write(temp_percpu,&val); + + udma_fast_memcpy(sizeof(uint32_t), + __this_cpu_read(temp_phys_addr_percpu), + phys_addr); + + local_irq_restore(flags); +} +EXPORT_SYMBOL(al_dma_write_reg32); + +/****************************************************************************** + *****************************************************************************/ +int al_dma_core_terminate( + struct al_dma_device *device) +{ + int status = 0; + + struct dma_device *dma = &device->common; + + dev_dbg( + dma->dev, + "%s(%p)\n", + __func__, + device); + + dma_async_device_unregister(&device->common); + + al_dma_free_interrupts(device); + + kfree(device->ssm_dma_params.name); + + kmem_cache_destroy(device->cache); + + return status; +} + +/****************************************************************************** + *****************************************************************************/ +static int al_dma_init_channels(struct al_dma_device *device, int max_channels) +{ + int i; + struct al_dma_chan *chan; + struct device *dev = &device->pdev->dev; + struct dma_device *dma = &device->common; + + INIT_LIST_HEAD(&dma->channels); + dma->chancnt = max_channels; + + if (dma->chancnt > ARRAY_SIZE(device->channels)) { + dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", + dma->chancnt, ARRAY_SIZE(device->channels)); + dma->chancnt = ARRAY_SIZE(device->channels); + } + + for (i = 0; i < dma->chancnt; i++) { + chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL); + if (!chan) + break; + + al_dma_init_channel(device, chan, i); + + } + dma->chancnt = i; + return i; +} + +/****************************************************************************** + *****************************************************************************/ +static void al_dma_init_channel(struct al_dma_device *device, + struct al_dma_chan *chan, int idx) +{ + struct dma_device *dma = &device->common; + struct dma_chan *c = &chan->common; + unsigned long data = (unsigned long) c; + + dev_dbg( + dma->dev, + "%s(%p, %p, %d): %p\n", + __func__, + device, + chan, + idx, + c); + + chan->device = device; + chan->idx = idx; + chan->hal_raid = &device->hal_raid; + + spin_lock_init(&chan->prep_lock); + + spin_lock_init(&chan->cleanup_lock); + chan->common.device = dma; + list_add_tail(&chan->common.device_node, &dma->channels); + device->channels[idx] = chan; + + tasklet_init(&chan->cleanup_task, al_dma_cleanup_tasklet, data); +} + +/****************************************************************************** + *****************************************************************************/ +static int al_dma_setup_interrupts(struct al_dma_device *device) +{ + struct al_dma_chan *chan; + struct pci_dev *pdev = device->pdev; + struct device *dev = &pdev->dev; + struct msix_entry *msix; + int i, j, msixcnt; + int err = -EINVAL; + + /* The number of MSI-X vectors should equal the number of channels */ + msixcnt = device->common.chancnt; + + for (i = 0; i < msixcnt; i++) + device->msix_entries[i].entry = 3 + i; + + err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt); + + if (err < 0) { + dev_err(dev, "pci_enable_msix_exact failed! using intx instead.\n"); + goto intx; + } + + if (err > 0) { + dev_err(dev, "pci_enable_msix_exact failed! msix_single_vector.\n"); + goto msix_single_vector; + } + + for (i = 0; i < msixcnt; i++) { + msix = &device->msix_entries[i]; + + chan = al_dma_chan_by_index(device, i); + + dev_dbg(dev, "%s: requesting irq %d\n", __func__, msix->vector); + + snprintf(device->irq_tbl[i].name, AL_DMA_IRQNAME_SIZE, + "al-dma-comp-%d@pci:%s", i, + pci_name(pdev)); + + err = devm_request_irq( + dev, + msix->vector, + al_dma_do_interrupt_msix, + 0, + device->irq_tbl[i].name, + chan); + + if (err) { + dev_err(dev, "devm_request_irq failed!.\n"); + + for (j = 0; j < i; j++) { + msix = &device->msix_entries[j]; + chan = al_dma_chan_by_index(device, j); + devm_free_irq(dev, msix->vector, chan); + } + + /* goto msix_single_vector; */ + return -EIO; + } + + /* setup interrupt affinity */ + if (cpu_online(chan->idx)) + cpumask_set_cpu(chan->idx, &chan->affinity_mask); + else + cpumask_copy(&chan->affinity_mask, cpu_online_mask); + + dev_dbg( + dev, + "Setting affinity of channel %d to %lx\n", + chan->idx, + chan->affinity_mask.bits[0]); + + err = irq_set_affinity_hint(msix->vector, &chan->affinity_mask); + if (err) { + dev_err(dev, "irq_set_affinity_hint failed!\n"); + return err; + } + + err = irq_set_affinity(msix->vector, &chan->affinity_mask); + if (err) { + dev_err(dev, "irq_set_affinity failed!\n"); + return err; + } + } + + err = al_udma_iofic_config( + (struct unit_regs *)device->udma_regs_base, + AL_IOFIC_MODE_MSIX_PER_Q, 0x480, 0x480, 0x1E0, 0x1E0); + if (err) { + dev_err(dev, "al_udma_iofic_config failed!.\n"); + return err; + } + + al_udma_iofic_unmask( + (struct unit_regs *)device->udma_regs_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_B, + ((1 << (device->common.chancnt)) - 1)); + + goto done; + +msix_single_vector: + msix = &device->msix_entries[0]; + + msix->entry = 0; + + err = pci_enable_msix_exact(pdev, device->msix_entries, 1); + + if (err) + goto intx; + + snprintf(device->irq_tbl[0].name, AL_DMA_IRQNAME_SIZE, + "al-dma-msix-all@pci:%s", pci_name(pdev)); + + err = devm_request_irq( + dev, + msix->vector, + al_dma_do_interrupt, + IRQF_TRIGGER_RISING, + device->irq_tbl[0].name, device); + + if (err) { + pci_disable_msix(pdev); + goto intx; + } + + goto done; + +intx: + snprintf(device->irq_tbl[0].name, AL_DMA_IRQNAME_SIZE, + "al-dma-intx-all@pci:%s", pci_name(pdev)); + + err = devm_request_irq(dev, pdev->irq, al_dma_do_interrupt, + IRQF_SHARED, device->irq_tbl[0].name, device); + if (err) + goto err_no_irq; + +done: + return 0; + +err_no_irq: + /* Disable all interrupt generation */ + + dev_err(dev, "no usable interrupts\n"); + return err; +} + +/****************************************************************************** + *****************************************************************************/ +static void al_dma_free_interrupts(struct al_dma_device *device) +{ + struct al_dma_chan *chan; + struct pci_dev *pdev = device->pdev; + struct device *dev = &pdev->dev; + struct msix_entry *msix; + int i, msixcnt; + + /* The number of MSI-X vectors should equal the number of channels */ + msixcnt = device->common.chancnt; + + if (pdev->msix_enabled) { + msix = &device->msix_entries[0]; + if (msix->entry == 0) { + devm_free_irq(dev, msix->vector, device); + pci_disable_msix(pdev); + return; + } + + for (i = 0; i < msixcnt; i++) { + msix = &device->msix_entries[i]; + chan = al_dma_chan_by_index(device, i); + irq_set_affinity_hint(msix->vector, NULL); + devm_free_irq(dev, msix->vector, chan); + } + + pci_disable_msix(pdev); + } else { + devm_free_irq(dev, pdev->irq, device); + } +} + +/****************************************************************************** + *****************************************************************************/ +/* al_dma_alloc_chan_resources - allocate/initialize tx and rx descriptor rings + */ +static int al_dma_alloc_chan_resources(struct dma_chan *c) +{ + struct al_dma_chan *chan = to_al_dma_chan(c); + struct device *dev = chan->device->common.dev; + struct al_dma_sw_desc **sw_ring; + struct al_udma_q_params tx_params; + struct al_udma_q_params rx_params; + int ring_alloc_order = al_dma_get_ring_alloc_order(); + int tx_descs_order = al_dma_get_tx_descs_order(); + int rx_descs_order = al_dma_get_rx_descs_order(); + uint32_t rc = 0; + + dev_dbg(dev, "al_dma_alloc_chan_resources: channel %d\n", + chan->idx); + + /* have we already been set up? */ + if (chan->sw_ring) + return 1 << chan->alloc_order; + + chan->tx_descs_num = 1 << tx_descs_order; + chan->rx_descs_num = 1 << rx_descs_order; + + /* allocate coherent memory for Tx submission descriptors */ + chan->tx_dma_desc_virt = dma_alloc_coherent(dev, + chan->tx_descs_num * + sizeof(union al_udma_desc), + &chan->tx_dma_desc, + GFP_KERNEL); + if (chan->tx_dma_desc_virt == NULL) { + dev_err(dev, "failed to allocate %d bytes of coherent " + "memory for Tx submission descriptors\n", + chan->tx_descs_num * sizeof(union al_udma_desc)); + return -ENOMEM; + } + dev_dbg(dev, "allocted tx descriptor ring: virt 0x%p phys 0x%llx\n", + chan->tx_dma_desc_virt, (u64)chan->tx_dma_desc); + + /* allocate coherent memory for Rx submission descriptors */ + chan->rx_dma_desc_virt = dma_alloc_coherent(dev, + chan->rx_descs_num * + sizeof(union al_udma_desc), + &chan->rx_dma_desc, + GFP_KERNEL); + if (chan->rx_dma_desc_virt == NULL) { + dev_err(dev, "failed to allocate %d bytes of coherent " + "memory for Rx submission descriptors\n", + chan->rx_descs_num * sizeof(union al_udma_desc)); + + al_dma_free_chan_resources(c); + return -ENOMEM; + } + dev_dbg(dev, "allocted rx descriptor ring: virt 0x%p phys 0x%llx\n", + chan->rx_dma_desc_virt, (u64)chan->rx_dma_desc); + + /* allocate coherent memory for Rx completion descriptors */ + chan->rx_dma_cdesc_virt = dma_alloc_coherent(dev, + chan->rx_descs_num * + AL_DMA_RAID_RX_CDESC_SIZE, + &chan->rx_dma_cdesc, + GFP_KERNEL); + if (chan->rx_dma_cdesc_virt == NULL) { + dev_err(dev, "failed to allocate %d bytes of coherent " + "memory for Rx completion descriptors\n", + chan->rx_descs_num * AL_DMA_RAID_RX_CDESC_SIZE); + + al_dma_free_chan_resources(c); + return -ENOMEM; + } + + /* clear the Rx completion descriptors to avoid false positive */ + memset( + chan->rx_dma_cdesc_virt, + 0, + chan->rx_descs_num * AL_DMA_RAID_RX_CDESC_SIZE); + + dev_dbg( + dev, + "allocted rx completion desc ring: virt 0x%p phys 0x%llx\n", + chan->rx_dma_cdesc_virt, (u64)chan->rx_dma_cdesc); + + tx_params.size = chan->tx_descs_num; + tx_params.desc_base = chan->tx_dma_desc_virt; + tx_params.desc_phy_base = chan->tx_dma_desc; + tx_params.cdesc_base = NULL; /* don't use Tx completion ring */ + tx_params.cdesc_phy_base = 0; + tx_params.cdesc_size = AL_DMA_RAID_TX_CDESC_SIZE; /* size is needed */ + + rx_params.size = chan->rx_descs_num; + rx_params.desc_base = chan->rx_dma_desc_virt; + rx_params.desc_phy_base = chan->rx_dma_desc; + rx_params.cdesc_base = chan->rx_dma_cdesc_virt; + rx_params.cdesc_phy_base = chan->rx_dma_cdesc; + rx_params.cdesc_size = AL_DMA_RAID_RX_CDESC_SIZE; + + /* alloc sw descriptors */ + if (ring_alloc_order < AL_DMA_SW_RING_MIN_ORDER) { + dev_err( + dev, + "%s: ring_alloc_order = %d < %d!\n", + __func__, + ring_alloc_order, + AL_DMA_SW_RING_MIN_ORDER); + + al_dma_free_chan_resources(c); + return -EINVAL; + } else if (ring_alloc_order > AL_DMA_SW_RING_MAX_ORDER) { + dev_err( + dev, + "%s: ring_alloc_order = %d > %d!\n", + __func__, + ring_alloc_order, + AL_DMA_SW_RING_MAX_ORDER); + + al_dma_free_chan_resources(c); + return -EINVAL; + } else if (ring_alloc_order > rx_descs_order) { + dev_warn( + dev, + "%s: ring_alloc_order > rx_descs_order (%d>%d)!\n", + __func__, + ring_alloc_order, + rx_descs_order); + + } + + sw_ring = al_dma_alloc_sw_ring(chan, ring_alloc_order, GFP_KERNEL); + if (!sw_ring) + return -ENOMEM; + + spin_lock_bh(&chan->cleanup_lock); + spin_lock_bh(&chan->prep_lock); + chan->sw_ring = sw_ring; + chan->head = 0; + chan->tail = 0; + chan->alloc_order = ring_alloc_order; + chan->tx_desc_produced = 0; + spin_unlock_bh(&chan->prep_lock); + spin_unlock_bh(&chan->cleanup_lock); + + rc = al_ssm_dma_q_init(&chan->device->hal_raid, chan->idx, + &tx_params, &rx_params, AL_RAID_Q); + if (rc) { + dev_err(dev, "failed to initialize hal q %d. rc %d\n", + chan->idx, rc); + al_dma_free_chan_resources(c); + return rc; + } + + /* should we return less ?*/ + return 1 << chan->alloc_order; +} + +/****************************************************************************** + *****************************************************************************/ +/* al_dma_free_chan_resources - free tx and rx descriptor rings + * @chan: channel to be free + */ +static void al_dma_free_chan_resources(struct dma_chan *c) +{ + struct al_dma_chan *chan = to_al_dma_chan(c); + struct device *dev = chan->device->common.dev; + struct al_dma_sw_desc **sw_ring; + int i; + + dev_dbg(dev, "%s(%p): %p\n", __func__, c, chan); + + tasklet_disable(&chan->cleanup_task); + + al_dma_cleanup_fn(chan, 0); + + spin_lock_bh(&chan->cleanup_lock); + sw_ring = chan->sw_ring; + for (i = 0; i < (1 << chan->alloc_order); i++) + al_dma_free_ring_ent(sw_ring[i], chan); + + kfree(chan->sw_ring); + + spin_unlock_bh(&chan->cleanup_lock); + if (chan->tx_dma_desc_virt != NULL) { + dma_free_coherent( + dev, + chan->tx_descs_num * sizeof(union al_udma_desc), + chan->tx_dma_desc_virt, chan->tx_dma_desc); + chan->tx_dma_desc_virt = NULL; + } + + if (chan->rx_dma_desc_virt != NULL) { + dma_free_coherent( + dev, + chan->rx_descs_num * sizeof(union al_udma_desc), + chan->rx_dma_desc_virt, + chan->rx_dma_desc); + chan->rx_dma_desc_virt = NULL; + } + + if (chan->rx_dma_cdesc_virt != NULL) { + dma_free_coherent(dev, chan->rx_descs_num * + AL_DMA_RAID_RX_CDESC_SIZE, + chan->rx_dma_cdesc_virt, chan->rx_dma_cdesc); + chan->rx_dma_desc_virt = NULL; + } + + return; +} + +/****************************************************************************** + *****************************************************************************/ +static struct al_dma_sw_desc **al_dma_alloc_sw_ring( + struct al_dma_chan *chan, + int order, + gfp_t flags) +{ + struct al_dma_sw_desc **ring; + int descs = 1 << order; + int i; + + /* allocate the array to hold the software ring */ + ring = kcalloc(descs, sizeof(*ring), flags); + if (!ring) + return NULL; + for (i = 0; i < descs; i++) { + ring[i] = al_dma_alloc_ring_ent(chan, flags); + if (!ring[i]) { + while (i--) + al_dma_free_ring_ent(ring[i], chan); + kfree(ring); + return NULL; + } + set_desc_id(ring[i], i); + } + + return ring; +} + +/****************************************************************************** + *****************************************************************************/ +static struct al_dma_sw_desc *al_dma_alloc_ring_ent( + struct al_dma_chan *chan, + gfp_t flags) +{ + struct al_dma_sw_desc *desc; + + desc = kmem_cache_zalloc(chan->device->cache, flags); + if (!desc) + return NULL; + + dma_async_tx_descriptor_init(&desc->txd, &chan->common); + desc->txd.tx_submit = al_dma_tx_submit_unlock; + return desc; +} + +/****************************************************************************** + *****************************************************************************/ +static void al_dma_free_ring_ent( + struct al_dma_sw_desc *desc, + struct al_dma_chan *chan) +{ + kmem_cache_free(chan->device->cache, desc); +} + +/* wrappers for accessing PCI configuration space */ +static int al_dma_read_pcie_config(void *handle, int where, uint32_t *val) +{ + /* handle is a pointer to the pci_dev */ + pci_read_config_dword((struct pci_dev *)handle, where, val); + return 0; +} + +static int al_dma_write_pcie_config(void *handle, int where, uint32_t val) +{ + /* handle is a pointer to the pci_dev */ + pci_write_config_dword((struct pci_dev *)handle, where, val); + return 0; +} + +/* wrapper for PCI function level reset */ +static int al_dma_write_pcie_flr(void *handle) +{ + /* handle is a pointer to the pci_dev */ + __pci_reset_function_locked((struct pci_dev *)handle); + udelay(1000); + return 0; +} + +/** + * al_dma_flr - perform Function Level Reset + * @pdev: PCI device to reset + */ +void al_dma_flr(struct pci_dev *pdev) +{ + al_pcie_perform_flr(al_dma_read_pcie_config, + al_dma_write_pcie_config, + al_dma_write_pcie_flr, + pdev); +} + +/****************************************************************************** + *****************************************************************************/ +/** + * al_dma_get_sw_desc_lock - get sw desc and grab ring producer lock + * @chan: dma channel to operate on + * @num: the number of required sw descriptos + */ +int al_dma_get_sw_desc_lock(struct al_dma_chan *chan, int num) +{ + spin_lock_bh(&chan->prep_lock); + + /* never allow the last descriptor to be consumed, we need at + * least one free at all times to allow for on-the-fly ring + * resizing. + */ + if (likely(al_dma_ring_space(chan) >= num)) { + dev_dbg(to_dev(chan), "%s: (%x:%x)\n", + __func__, chan->head, chan->tail); + return 0; /* with chan->prep_lock held */ + } + + spin_unlock_bh(&chan->prep_lock); + + return -ENOMEM; +} + +/****************************************************************************** + *****************************************************************************/ +/** + * al_dma_do_interrupt - handler used for single vector interrupt mode + * @irq: interrupt id + * @data: interrupt data + */ +static irqreturn_t al_dma_do_interrupt(int irq, void *data) +{ + pr_debug("%s(%d, %p)\n", __func__, irq, data); + + /* TODO: handle interrupt registers */ + + return IRQ_HANDLED; +} + +/****************************************************************************** + *****************************************************************************/ +/** + * al_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode + * @irq: interrupt id + * @data: interrupt data + */ +static irqreturn_t al_dma_do_interrupt_msix(int irq, void *data) +{ + struct al_dma_chan *chan = data; + + pr_debug("%s(%d, %p)\n", __func__, irq, data); + + tasklet_schedule(&chan->cleanup_task); + + return IRQ_HANDLED; +} + +/****************************************************************************** + *****************************************************************************/ +/** + * al_dma_tx_status - poll the status of an DMA transaction + * @c: channel handle + * @cookie: transaction identifier + * @txstate: if set, updated with the transaction state + */ +static enum dma_status al_dma_tx_status( + struct dma_chan *c, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct al_dma_chan *chan = to_al_dma_chan(c); + enum dma_status ret; + + dev_dbg( + to_dev(chan), + "%s(%d)\n", + __func__, + cookie); + + ret = dma_cookie_status(c, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + al_dma_cleanup_fn(chan, 0); + + return dma_cookie_status(c, cookie, txstate); +} + +/****************************************************************************** + *****************************************************************************/ +static inline int al_dma_issue_pending_raw(struct al_dma_chan *chan) +{ + int err = 0; + + if (chan->tx_desc_produced) { + dev_dbg( + chan->device->common.dev, + "%s(%p): issuing %u descriptors\n", + __func__, + chan, + chan->tx_desc_produced); + + err = al_raid_dma_action( + chan->hal_raid, + chan->idx, + chan->tx_desc_produced); + if (err) + dev_err( + chan->device->common.dev, + "al_raid_dma_action failed\n"); + + chan->tx_desc_produced = 0; + } + + return err; +} + +/****************************************************************************** + *****************************************************************************/ +void al_dma_tx_submit_sw_cond_unlock( + struct al_dma_chan *chan, + struct dma_async_tx_descriptor *tx) +{ + if (tx) { + struct dma_chan *c = tx->chan; + dma_cookie_t cookie = dma_cookie_assign(tx); + + c->cookie = cookie; + + dev_dbg( + chan->device->common.dev, + "%s: cookie = %d\n", + __func__, + cookie); + + /** + * according to Documentation/circular-buffers.txt we should + * have smp_wmb before intcrementing the head, however, the + * al_raid_dma_action contains writel() which implies dmb on + * ARM so this smp_wmb() can be omitted on ARM platforms + */ + /*smp_wmb();*/ /* commit the item before updating the head */ + chan->head += chan->sw_desc_num_locked; + /** + * in our case the consumer (interrupt handler) will be waken up + * by the hw, so we send the transaction to the hw after + * incrementing the head + **/ + } + +#if !AL_DMA_ISSUE_PNDNG_UPON_SUBMIT + spin_unlock_bh(&chan->prep_lock); +#endif +} + +/****************************************************************************** + *****************************************************************************/ +static dma_cookie_t al_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx) +{ +#if AL_DMA_ISSUE_PNDNG_UPON_SUBMIT + int err; + + struct dma_chan *c = tx->chan; + struct al_dma_chan *chan = to_al_dma_chan(c); + + dev_dbg( + chan->device->common.dev, + "%s(%p): %p, %p\n", + __func__, + tx, + chan, + c); + + err = al_dma_issue_pending_raw(chan); + if (err) + dev_err( + chan->device->common.dev, + "%s: al_dma_issue_pending\n", + __func__); + + spin_unlock_bh(&chan->prep_lock); +#endif + + return tx->cookie; +} + +/****************************************************************************** + *****************************************************************************/ +static void al_dma_issue_pending(struct dma_chan *c) +{ +#if !AL_DMA_ISSUE_PNDNG_UPON_SUBMIT + int err; + + struct al_dma_chan *chan = to_al_dma_chan(c); + + spin_lock_bh(&chan->prep_lock); + + dev_dbg( + chan->device->common.dev, + "%s(%p)\n", + __func__, + chan); + + err = al_dma_issue_pending_raw(chan); + if (err) + dev_err( + chan->device->common.dev, + "%s: al_dma_issue_pending\n", + __func__); + + spin_unlock_bh(&chan->prep_lock); +#endif +} + +static int al_dma_device_terminate_all(struct dma_chan *dchan) +{ + struct al_dma_chan *chan = to_al_dma_chan(dchan); + int err; + dev_err( + chan->device->common.dev, + "%s: Unexpected cmd (terminate_all)!\n", + __func__); + err = -ENXIO; + + return err; + +} + +static int al_dma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + + struct al_dma_chan *chan = to_al_dma_chan(dchan); + int err; + dev_err( + chan->device->common.dev, + "%s: Unexpected cmd (device_config)!\n", + __func__); + err = -ENXIO; + + return err; + +} + +static void al_dma_cleanup_tasklet(unsigned long data) +{ + struct al_dma_chan *chan = to_al_dma_chan((void *) data); + int num_completed; + + num_completed = al_dma_cleanup_fn(chan, 1); + + if (unlikely(num_completed < 0)) + dev_err( + chan->device->common.dev, + "al_dma_cleanup_fn failed\n"); + + al_udma_iofic_unmask( + (struct unit_regs *)chan->device->udma_regs_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_B, + 1 << chan->idx); +} + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_main.c b/target/linux/alpine/files/drivers/dma/al/al_dma_main.c new file mode 100644 index 00000000000000..d8c25e5460248d --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_main.c @@ -0,0 +1,233 @@ +/* + * Annapurna Labs DMA Linux driver + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include +#include +#include + +#include "al_dma.h" +#include "al_dma_sysfs.h" + +MODULE_VERSION(AL_DMA_VERSION); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Annapurna Labs"); + +#define DRV_NAME "al_dma" + +enum { + /* BAR's are enumerated in terms of pci_resource_start() terms */ + AL_DMA_UDMA_BAR = 0, + AL_DMA_APP_BAR = 4, +}; + +static int al_dma_pci_probe( + struct pci_dev *pdev, + const struct pci_device_id *id); + +static void al_dma_pci_remove( + struct pci_dev *pdev); + +static void al_dma_pci_shutdown( + struct pci_dev *pdev); + +static const struct pci_device_id al_dma_pci_tbl[] = { + { PCI_VDEVICE(AMAZON_ANNAPURNA_LABS, PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_RAID_DMA) }, + { PCI_VDEVICE(AMAZON_ANNAPURNA_LABS, PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_RAID_DMA_VF) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, al_dma_pci_tbl); + +static struct pci_driver al_dma_pci_driver = { + .name = DRV_NAME, + .id_table = al_dma_pci_tbl, + .probe = al_dma_pci_probe, + .remove = al_dma_pci_remove, + .shutdown = al_dma_pci_shutdown, +}; + +/****************************************************************************** + *****************************************************************************/ +static int al_dma_pci_probe( + struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int status = 0; + + void __iomem * const *iomap; + struct device *dev = &pdev->dev; + struct al_dma_device *device; + int bar_reg; + u16 dev_id; + u8 rev_id; + + dev_dbg(dev, "%s(%p, %p)\n", __func__, pdev, id); + + pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id); + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); + + /* Perform FLR on a related function */ + al_dma_flr(pdev); + + status = pcim_enable_device(pdev); + if (status) { + pr_err("%s: pcim_enable_device failed!\n", __func__); + goto done; + } + + bar_reg = pdev->is_physfn ? + (1 << AL_DMA_UDMA_BAR) | (1 << AL_DMA_APP_BAR) : + (1 << AL_DMA_UDMA_BAR); + + status = pcim_iomap_regions( + pdev, + bar_reg, + DRV_NAME); + if (status) { + pr_err("%s: pcim_iomap_regions failed!\n", __func__); + goto done; + } + + iomap = pcim_iomap_table(pdev); + if (!iomap) { + status = -ENOMEM; + goto done; + } + + status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (status) + goto done; + + device = devm_kzalloc(dev, sizeof(struct al_dma_device), GFP_KERNEL); + if (!device) { + status = -ENOMEM; + goto done; + } + + device->pdev = pdev; + device->dev_id = dev_id; + device->rev_id = rev_id; + + pci_set_master(pdev); + pci_set_drvdata(pdev, device); + dev_set_drvdata(dev, device); + + device->common.dev = &pdev->dev; + +#ifdef CONFIG_AL_DMA_PCI_IOV + if (PCI_FUNC(pdev->devfn) == 0) { + status = pci_enable_sriov(pdev, 1); + if (status) { + dev_err(dev, "%s: pci_enable_sriov failed, status %d\n", + __func__, status); + } + } +#endif + + if (pdev->is_physfn) { + status = al_dma_core_init( + device, + iomap[AL_DMA_UDMA_BAR], + iomap[AL_DMA_APP_BAR]); + if (status) { + dev_err(dev, "%s: al_dma_core_init failed\n", __func__); + goto done; + } + + status = al_dma_sysfs_init(dev); + if (status) { + dev_err(dev, "%s: al_dma_sysfs_init failed\n", __func__); + goto err_sysfs_init; + } + } + else { + status = al_dma_fast_init( + device, + iomap[AL_DMA_UDMA_BAR]); + if (status) { + dev_err(dev, "%s: al_dma_fast_init failed\n", __func__); + goto done; + } + } + + goto done; + +err_sysfs_init: + al_dma_core_terminate(device); + +done: + return status; +} + +/****************************************************************************** + *****************************************************************************/ +static void al_dma_pci_remove(struct pci_dev *pdev) +{ + struct al_dma_device *device = pci_get_drvdata(pdev); + struct device *dev = &pdev->dev; + + if (!device) + return; + + dev_dbg(&pdev->dev, "Removing dma\n"); + + if (pdev->is_physfn) { + al_dma_sysfs_terminate(dev); + + al_dma_core_terminate(device); + } else { + al_dma_fast_terminate(device); + } + +} + +static void al_dma_pci_shutdown(struct pci_dev *pdev) +{ + /* Don't call for physfn as its removal is not fully implement yet */ + if (!pdev->is_physfn) + al_dma_pci_remove(pdev); +} + +/****************************************************************************** + *****************************************************************************/ +static int __init al_dma_init_module(void) +{ + int err; + + pr_info( + "%s: Annapurna Labs DMA Driver %s\n", + DRV_NAME, + AL_DMA_VERSION); + + err = pci_register_driver(&al_dma_pci_driver); + + return err; +} +module_init(al_dma_init_module); + +/****************************************************************************** + *****************************************************************************/ +static void __exit al_dma_exit_module(void) +{ + pci_unregister_driver(&al_dma_pci_driver); +} +module_exit(al_dma_exit_module); diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_module_params.c b/target/linux/alpine/files/drivers/dma/al/al_dma_module_params.c new file mode 100644 index 00000000000000..18f94d817d0fb1 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_module_params.c @@ -0,0 +1,138 @@ +/* + * drivers/crypto/al/al_dma_module_params.c + * + * Annapurna Labs DMA driver - module params + * + * Copyright (C) 2013 Annapurna Labs Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include "al_dma.h" + +static int op_support_interrupt = 1; +module_param(op_support_interrupt, int, 0444); +MODULE_PARM_DESC(op_support_interrupt, + "DMA_INTERRUPT capability (default: 1 - enabled)"); + +static int op_support_memcpy = 1; +module_param(op_support_memcpy, int, 0444); +MODULE_PARM_DESC(op_support_memcpy, + "DMA_MEMCPY capability (default: 1 - enabled)"); + +static int op_support_memset = 1; +module_param(op_support_memset, int, 0444); +MODULE_PARM_DESC(op_support_memset, + "DMA_MEMSET capability (default: 1 - enabled)"); + +static int op_support_xor = 1; +module_param(op_support_xor, int, 0444); +MODULE_PARM_DESC(op_support_xor, + "DMA_XOR capability (default: 1 - enabled)"); + +static int op_support_xor_val = 1; +module_param(op_support_xor_val, int, 0444); +MODULE_PARM_DESC(op_support_xor_val, + "DMA_XOR_VAL capability (default: 1 - enabled)"); + +static int op_support_pq = 1; +module_param(op_support_pq, int, 0444); +MODULE_PARM_DESC(op_support_pq, + "DMA_PQ capability (default: 1 - enabled)"); + +static int op_support_pq_val = 1; +module_param(op_support_pq_val, int, 0444); +MODULE_PARM_DESC(op_support_pq_val, + "DMA_PQ_VAL capability (default: 1 - enabled)"); + +static int max_channels = AL_DMA_MAX_CHANNELS; +module_param(max_channels, int, 0644); +MODULE_PARM_DESC( + max_channels, + "maximum number of channels (queues) to enable (default: 4)"); + +static int ring_alloc_order = 10; +module_param(ring_alloc_order, int, 0644); +MODULE_PARM_DESC( + ring_alloc_order, + "allocate 2^n descriptors per channel" + " (default: 8 max: 16)"); + +static int tx_descs_order = 15; +module_param(tx_descs_order, int, 0644); +MODULE_PARM_DESC( + tx_descs_order, + "allocate 2^n of descriptors in Tx queue (default: 15)"); + +static int rx_descs_order = 15; +module_param(rx_descs_order, int, 0644); +MODULE_PARM_DESC( + rx_descs_order, + "allocate 2^n of descriptors in Rx queue (default: 15)"); + +int al_dma_get_op_support_interrupt(void) +{ + return op_support_interrupt; +} + +int al_dma_get_op_support_memcpy(void) +{ + return op_support_memcpy; +} + +int al_dma_get_op_support_memset(void) +{ + return op_support_memset; +} + +int al_dma_get_op_support_xor(void) +{ + return op_support_xor; +} + +int al_dma_get_op_support_xor_val(void) +{ + return op_support_xor_val; +} + +int al_dma_get_op_support_pq(void) +{ + return op_support_pq; +} + +int al_dma_get_op_support_pq_val(void) +{ + return op_support_pq_val; +} + +int al_dma_get_max_channels(void) +{ + return max_channels; +} + +int al_dma_get_ring_alloc_order(void) +{ + return ring_alloc_order; +} + +int al_dma_get_tx_descs_order(void) +{ + return tx_descs_order; +} + +int al_dma_get_rx_descs_order(void) +{ + return rx_descs_order; +} diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_module_params.h b/target/linux/alpine/files/drivers/dma/al/al_dma_module_params.h new file mode 100644 index 00000000000000..2a44a7aaa95652 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_module_params.h @@ -0,0 +1,48 @@ +/* + * drivers/crypto/al/al_dma_module_params.h + * + * Annapurna Labs DMA driver - module params + * + * Copyright (C) 2015 Annapurna Labs Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __AL_DMA_MODULE_PARAMS_H__ +#define __AL_DMA_MODULE_PARAMS_H__ + +int al_dma_get_op_support_interrupt(void); + +int al_dma_get_op_support_memcpy(void); + +int al_dma_get_op_support_memset(void); + +int al_dma_get_op_support_xor(void); + +int al_dma_get_op_support_xor_val(void); + +int al_dma_get_op_support_pq(void); + +int al_dma_get_op_support_pq_val(void); + +int al_dma_get_max_channels(void); + +int al_dma_get_ring_alloc_order(void); + +int al_dma_get_tx_descs_order(void); + +int al_dma_get_rx_descs_order(void); + +#endif /* __AL_DMA_MODULE_PARAMS_H__ */ diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_prep.h b/target/linux/alpine/files/drivers/dma/al/al_dma_prep.h new file mode 100644 index 00000000000000..61ff7eb264e022 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_prep.h @@ -0,0 +1,82 @@ +/* + * Annapurna Labs DMA Linux driver - operation preparation declarations + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef __AL_DMA_PREP_H__ +#define __AL_DMA_PREP_H__ + +#include "al_dma.h" + +struct dma_async_tx_descriptor *al_dma_prep_interrupt_lock( + struct dma_chan *c, + unsigned long flags); + +struct dma_async_tx_descriptor *al_dma_prep_memcpy_lock( + struct dma_chan *c, + dma_addr_t dest, + dma_addr_t src, + size_t len, + unsigned long flags); + +struct dma_async_tx_descriptor *al_dma_prep_memset_lock( + struct dma_chan *c, + dma_addr_t dest, + int value, + size_t len, + unsigned long flags); + +struct dma_async_tx_descriptor *al_dma_prep_xor_lock( + struct dma_chan *c, + dma_addr_t dest, + dma_addr_t *src, + unsigned int src_cnt, + size_t len, + unsigned long flags); + +struct dma_async_tx_descriptor *al_dma_prep_pq_lock( + struct dma_chan *c, + dma_addr_t *dst, + dma_addr_t *src, + unsigned int src_cnt, + const unsigned char *scf, + size_t len, + unsigned long flags); + +struct dma_async_tx_descriptor *al_dma_prep_pq_val_lock( + struct dma_chan *c, + dma_addr_t *pq, + dma_addr_t *src, + unsigned int src_cnt, + const unsigned char *scf, + size_t len, + enum sum_check_flags *pqres, + unsigned long flags); + +struct dma_async_tx_descriptor *al_dma_prep_xor_val_lock( + struct dma_chan *c, + dma_addr_t *src, + unsigned int src_cnt, + size_t len, + enum sum_check_flags *result, + unsigned long flags); + +#endif + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_prep_interrupt.c b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_interrupt.c new file mode 100644 index 00000000000000..10833fc692177d --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_interrupt.c @@ -0,0 +1,116 @@ +/* + * Annapurna Labs DMA Linux driver - Interrupt preparation + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "al_dma.h" + +/****************************************************************************** + *****************************************************************************/ +struct dma_async_tx_descriptor *al_dma_prep_interrupt_lock( + struct dma_chan *c, + unsigned long flags) +{ + struct al_dma_chan *chan = to_al_dma_chan(c); + struct dma_async_tx_descriptor *txd = NULL; + int idx; + int32_t rc; + struct al_dma_sw_desc *desc; + struct al_raid_transaction *xaction; + + dev_dbg( + chan->device->common.dev, + "%s: chan->idx = %d, flags = %08x\n", + __func__, + chan->idx, + (unsigned int)flags); + + if (likely(al_dma_get_sw_desc_lock(chan, 1) == 0)) + idx = chan->head; + else { + dev_dbg( + chan->device->common.dev, + "%s: al_dma_get_sw_desc_lock failed!\n", + __func__); + + return NULL; + } + + chan->sw_desc_num_locked = 1; + + desc = al_dma_get_ring_ent(chan, idx); + + desc->umap_ent_cnt = 0; + + txd = &desc->txd; + + desc->txd.flags = flags; + + /* prepare hal transaction */ + xaction = &desc->hal_xaction; + memset(xaction, 0, sizeof(struct al_raid_transaction)); + xaction->op = AL_RAID_OP_NOP; + xaction->flags |= AL_SSM_INTERRUPT; + if (flags & DMA_PREP_FENCE) + xaction->flags |= AL_SSM_BARRIER; + + if (flags & (~(DMA_PREP_INTERRUPT | DMA_PREP_FENCE))) + dev_err( + chan->device->common.dev, + "%s: flags = %08x\n", + __func__, + (unsigned int)flags); + + xaction->num_of_srcs = 0; + xaction->total_src_bufs = 0; + + xaction->num_of_dsts = 0; + xaction->total_dst_bufs = 0; + + dev_dbg( + chan->device->common.dev, + "%s: xaction->flags = %08x\n", + __func__, + xaction->flags); + + /* send raid transaction to engine */ + rc = al_raid_dma_prepare(chan->hal_raid, chan->idx, + &desc->hal_xaction); + if (unlikely(rc)) { + dev_err(chan->device->common.dev, + "%s: al_raid_dma_prepare failed!\n", __func__); + spin_unlock_bh(&chan->prep_lock); + return NULL; + } + + chan->tx_desc_produced += desc->hal_xaction.tx_descs_count; + + AL_DMA_STATS_UPDATE( + chan, + chan->stats_prep.int_num, + 1, + chan->stats_prep.int_num, /* dummy */ + 0); + + al_dma_tx_submit_sw_cond_unlock(chan, txd); + + return txd; +} + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_prep_memcpy.c b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_memcpy.c new file mode 100644 index 00000000000000..df13719c27ac16 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_memcpy.c @@ -0,0 +1,165 @@ +/* + * Annapurna Labs DMA Linux driver - Memory copy preparation + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "al_dma.h" + +#define MAX_SIZE AL_DMA_MAX_SIZE_MEMCPY + +/****************************************************************************** + *****************************************************************************/ +struct dma_async_tx_descriptor *al_dma_prep_memcpy_lock( + struct dma_chan *c, + dma_addr_t dest, + dma_addr_t src, + size_t len, + unsigned long flags) +{ + struct al_dma_chan *chan = to_al_dma_chan(c); + struct dma_async_tx_descriptor *txd = NULL; + int idx; + int32_t rc; + int sw_desc_num_req = ALIGN(len, MAX_SIZE) / MAX_SIZE; + int sw_desc_num_req_orig = sw_desc_num_req; + size_t len_orig = len; + + dev_dbg( + chan->device->common.dev, + "%s: chan->idx = %d, dest = %X, src = %X, len = %d, flags = %08x\n", + __func__, + chan->idx, + (unsigned int)dest, + (unsigned int)src, + len, + (unsigned int)flags); + + if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0)) + idx = chan->head; + else { + dev_dbg( + chan->device->common.dev, + "%s: al_dma_get_sw_desc_lock failed!\n", + __func__); + + return NULL; + } + + chan->sw_desc_num_locked = sw_desc_num_req; + + if (unlikely(sw_desc_num_req > 1)) + dev_dbg( + chan->device->common.dev, + "%s: splitting transaction to %d sub-transactions\n\n", + __func__, + sw_desc_num_req); + + while (sw_desc_num_req) { + int cur_len = (len > MAX_SIZE) ? MAX_SIZE : len; + + struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx); + + struct al_raid_transaction *xaction; + + if (1 == sw_desc_num_req) + txd = &desc->txd; + + if (1 == sw_desc_num_req) { + int umap_ent_cnt = 0; + + dma_descriptor_unmap(txd); + desc->umap_ent_cnt = umap_ent_cnt; + } else + desc->umap_ent_cnt = 0; + + desc->txd.flags = flags; + desc->len = cur_len; + /* prepare hal transaction */ + xaction = &desc->hal_xaction; + memset(xaction, 0, sizeof(struct al_raid_transaction)); + xaction->op = AL_RAID_OP_MEM_CPY; + if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req)) + xaction->flags |= AL_SSM_INTERRUPT; + if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req)) + xaction->flags |= AL_SSM_BARRIER; + + /* use bufs[0] and block[0] for source buffers/blocks */ + desc->bufs[0].addr = src; + desc->bufs[0].len = cur_len; + desc->blocks[0].bufs = &desc->bufs[0]; + desc->blocks[0].num = 1; + xaction->srcs_blocks = &desc->blocks[0]; + xaction->num_of_srcs = 1; + xaction->total_src_bufs = 1; + + /* use bufs[1] and block[1] for destination buffers/blocks */ + desc->bufs[1].addr = dest; + desc->bufs[1].len = cur_len; + desc->blocks[1].bufs = &desc->bufs[1]; + desc->blocks[1].num = 1; + xaction->dsts_blocks = &desc->blocks[1]; + xaction->num_of_dsts = 1; + xaction->total_dst_bufs = 1; + + dev_dbg( + chan->device->common.dev, + "%s: xaction->flags = %08x\n", + __func__, + xaction->flags); + + /* send raid transaction to engine */ + rc = al_raid_dma_prepare(chan->hal_raid, chan->idx, + &desc->hal_xaction); + if (unlikely(rc)) { + dev_err(chan->device->common.dev, + "%s: al_raid_dma_prepare failed!\n", __func__); + spin_unlock_bh(&chan->prep_lock); + return NULL; + } + + chan->tx_desc_produced += desc->hal_xaction.tx_descs_count; + + desc->last_is_memcpy = 1; + +#ifdef AL_DMA_MEMCPY_VALIDATION + desc->memcpy_dest = phys_to_virt(dest); + desc->memcpy_src = phys_to_virt(src); + desc->memcpy_len = cur_len; +#endif + + idx++; + sw_desc_num_req--; + len -= MAX_SIZE; + dest += MAX_SIZE; + src += MAX_SIZE; + } + + AL_DMA_STATS_UPDATE( + chan, + chan->stats_prep.memcpy_num, + sw_desc_num_req_orig, + chan->stats_prep.memcpy_size, + len_orig); + + al_dma_tx_submit_sw_cond_unlock(chan, txd); + + return txd; +} + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_prep_memset.c b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_memset.c new file mode 100644 index 00000000000000..d544e2544e510f --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_memset.c @@ -0,0 +1,155 @@ +/* + * Annapurna Labs DMA Linux driver - Memory setting preparation + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "al_dma.h" + +#define MAX_SIZE AL_DMA_MAX_SIZE_MEMSET + +/****************************************************************************** + *****************************************************************************/ +struct dma_async_tx_descriptor *al_dma_prep_memset_lock( + struct dma_chan *c, + dma_addr_t dest, + int value, + size_t len, + unsigned long flags) +{ + struct al_dma_chan *chan = to_al_dma_chan(c); + struct dma_async_tx_descriptor *txd = NULL; + int idx; + int32_t rc; + int sw_desc_num_req = ALIGN(len, MAX_SIZE) / MAX_SIZE; + int sw_desc_num_req_orig = sw_desc_num_req; + size_t len_orig = len; + + dev_dbg( + chan->device->common.dev, + "%s: chan->idx = %d, dest = %X, value = %d, len = %d, flags = %08x\n", + __func__, + chan->idx, + (unsigned int)dest, + value, + len, + (unsigned int)flags); + + if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0)) + idx = chan->head; + else { + dev_dbg( + chan->device->common.dev, + "%s: al_dma_get_sw_desc_lock failed!\n", + __func__); + + return NULL; + } + + chan->sw_desc_num_locked = sw_desc_num_req; + + if (unlikely(sw_desc_num_req > 1)) + dev_dbg( + chan->device->common.dev, + "%s: splitting transaction to %d sub-transactions\n\n", + __func__, + sw_desc_num_req); + + while (sw_desc_num_req) { + int cur_len = (len > MAX_SIZE) ? MAX_SIZE : len; + + struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx); + + struct al_raid_transaction *xaction; + + if (1 == sw_desc_num_req) + txd = &desc->txd; + + if (1 == sw_desc_num_req) { + int umap_ent_cnt = 0; + + dma_descriptor_unmap(txd); + + desc->umap_ent_cnt = umap_ent_cnt; + } else + desc->umap_ent_cnt = 0; + + desc->txd.flags = flags; + desc->len = cur_len; + /* prepare hal transaction */ + xaction = &desc->hal_xaction; + memset(xaction, 0, sizeof(struct al_raid_transaction)); + xaction->op = AL_RAID_OP_MEM_SET; + if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req)) + xaction->flags |= AL_SSM_INTERRUPT; + if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req)) + xaction->flags |= AL_SSM_BARRIER; + + memset(xaction->data, value, sizeof(xaction->data)); + + /* MEMSET has no sources */ + xaction->num_of_srcs = 0; + xaction->total_src_bufs = 0; + + /* use bufs[1] and block[1] for destination buffers/blocks */ + desc->bufs[1].addr = dest; + desc->bufs[1].len = cur_len; + desc->blocks[1].bufs = &desc->bufs[1]; + desc->blocks[1].num = 1; + xaction->dsts_blocks = &desc->blocks[1]; + xaction->num_of_dsts = 1; + xaction->total_dst_bufs = 1; + + dev_dbg( + chan->device->common.dev, + "%s: xaction->flags = %08x\n", + __func__, + xaction->flags); + + /* send raid transaction to engine */ + rc = al_raid_dma_prepare(chan->hal_raid, chan->idx, + &desc->hal_xaction); + if (unlikely(rc)) { + dev_err( + chan->device->common.dev, + "%s: al_raid_dma_prepare failed!\n", __func__); + spin_unlock_bh(&chan->prep_lock); + return NULL; + } + + chan->tx_desc_produced += desc->hal_xaction.tx_descs_count; + + idx++; + sw_desc_num_req--; + len -= MAX_SIZE; + dest += MAX_SIZE; + } + + AL_DMA_STATS_UPDATE( + chan, + chan->stats_prep.memset_num, + sw_desc_num_req_orig, + chan->stats_prep.memset_size, + len_orig); + + al_dma_tx_submit_sw_cond_unlock(chan, txd); + + return txd; +} + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_prep_pq.c b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_pq.c new file mode 100644 index 00000000000000..4c27d13c604c5c --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_pq.c @@ -0,0 +1,213 @@ +/* + * Annapurna Labs DMA Linux driver - PQ preparation + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "al_dma.h" + +#define MAX_SIZE AL_DMA_MAX_SIZE_PQ + +/****************************************************************************** + *****************************************************************************/ +struct dma_async_tx_descriptor *al_dma_prep_pq_lock( + struct dma_chan *c, + dma_addr_t *dst, + dma_addr_t *src, + unsigned int src_cnt, + const unsigned char *scf, + size_t len, + unsigned long flags) +{ + struct al_dma_chan *chan = to_al_dma_chan(c); + struct dma_async_tx_descriptor *txd = NULL; + int idx; + int32_t rc; + int sw_desc_num_req = ALIGN(len, MAX_SIZE) / MAX_SIZE; + int sw_desc_num_req_orig = sw_desc_num_req; + size_t len_orig = len; + dma_addr_t src_off = 0; + dma_addr_t dst_off = 0; + int i; + + dev_dbg( + chan->device->common.dev, + "%s: dest_p = %X, dest_q = %X, src = %X, cnt = %d, len = %d," + " flags = %08x\n", + __func__, + (unsigned int)dst[0], + (unsigned int)dst[1], + (unsigned int)src, + src_cnt, + len, + (unsigned int)flags); + + if (unlikely(src_cnt > AL_DMA_MAX_XOR)) { + BUG(); + return NULL; + } + + if (unlikely( + (flags & DMA_PREP_PQ_DISABLE_P) && + (flags & DMA_PREP_PQ_DISABLE_Q))) { + BUG(); + return NULL; + } + + if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0)) + idx = chan->head; + else { + dev_dbg( + chan->device->common.dev, + "%s: al_dma_get_sw_desc_lock failed!\n", + __func__); + + return NULL; + } + + chan->sw_desc_num_locked = sw_desc_num_req; + + if (unlikely(sw_desc_num_req > 1)) + dev_dbg( + chan->device->common.dev, + "%s: splitting transaction to %d sub-transactions\n\n", + __func__, + sw_desc_num_req); + + while (sw_desc_num_req) { + int cur_len = (len > MAX_SIZE) ? MAX_SIZE : len; + + struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx); + + struct al_raid_transaction *xaction; + + if (1 == sw_desc_num_req) + txd = &desc->txd; + + if (1 == sw_desc_num_req) { + int umap_ent_cnt = 0; + dma_descriptor_unmap(txd); + + desc->umap_ent_cnt = umap_ent_cnt; + } else + desc->umap_ent_cnt = 0; + + desc->txd.flags = flags; + desc->len = cur_len; + /* prepare hal transaction */ + xaction = &desc->hal_xaction; + memset(xaction, 0, sizeof(struct al_raid_transaction)); + xaction->op = AL_RAID_OP_PQ_CALC; + if (flags & DMA_PREP_PQ_DISABLE_P) + xaction->op = AL_RAID_OP_Q_CALC; + if (flags & DMA_PREP_PQ_DISABLE_Q) + xaction->op = AL_RAID_OP_P_CALC; + if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req)) + xaction->flags |= AL_SSM_INTERRUPT; + if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req)) + xaction->flags |= AL_SSM_BARRIER; + + /* use bufs[0] and block[i] for source buffers/blocks */ + for (i = 0; i < src_cnt; i++) { + desc->bufs[i].addr = src[i] + src_off; + desc->bufs[i].len = cur_len; + desc->blocks[i].bufs = &desc->bufs[i]; + desc->blocks[i].num = 1; + } + + xaction->srcs_blocks = &desc->blocks[0]; + xaction->num_of_srcs = src_cnt; + xaction->total_src_bufs = src_cnt; + + /* use bufs[1] and block[1] for destination buffers/blocks */ + if (flags & DMA_PREP_PQ_DISABLE_P) { + desc->bufs[src_cnt].addr = dst[1] + dst_off; + desc->bufs[src_cnt].len = cur_len; + desc->blocks[src_cnt].bufs = &desc->bufs[src_cnt]; + desc->blocks[src_cnt].num = 1; + + xaction->num_of_dsts = 1; + xaction->total_dst_bufs = 1; + } else if (flags & DMA_PREP_PQ_DISABLE_Q) { + desc->bufs[src_cnt].addr = dst[0] + dst_off; + desc->bufs[src_cnt].len = cur_len; + desc->blocks[src_cnt].bufs = &desc->bufs[src_cnt]; + desc->blocks[src_cnt].num = 1; + + xaction->num_of_dsts = 1; + xaction->total_dst_bufs = 1; + } else { + desc->bufs[src_cnt].addr = dst[0] + dst_off; + desc->bufs[src_cnt].len = cur_len; + desc->blocks[src_cnt].bufs = &desc->bufs[src_cnt]; + desc->blocks[src_cnt].num = 1; + + desc->bufs[src_cnt + 1].addr = dst[1] + dst_off; + desc->bufs[src_cnt + 1].len = cur_len; + desc->blocks[src_cnt + 1].bufs = + &desc->bufs[src_cnt + 1]; + desc->blocks[src_cnt + 1].num = 1; + + xaction->num_of_dsts = 2; + xaction->total_dst_bufs = 2; + } + + xaction->dsts_blocks = &desc->blocks[src_cnt]; + + xaction->coefs = (uint8_t *)scf; + + dev_dbg( + chan->device->common.dev, + "%s: xaction->flags = %08x\n", + __func__, + xaction->flags); + + /* send raid transaction to engine */ + rc = al_raid_dma_prepare(chan->hal_raid, chan->idx, + &desc->hal_xaction); + if (unlikely(rc)) { + dev_err( + chan->device->common.dev, + "%s: al_raid_dma_prepare failed!\n", __func__); + spin_unlock_bh(&chan->prep_lock); + return NULL; + } + + chan->tx_desc_produced += desc->hal_xaction.tx_descs_count; + BUG_ON(desc->hal_xaction.tx_descs_count > AL_DMA_MAX_XOR); + + idx++; + sw_desc_num_req--; + len -= MAX_SIZE; + src_off += MAX_SIZE; + dst_off += MAX_SIZE; + } + + AL_DMA_STATS_UPDATE( + chan, + chan->stats_prep.pq_num, + sw_desc_num_req_orig, + chan->stats_prep.pq_size, + len_orig); + + al_dma_tx_submit_sw_cond_unlock(chan, txd); + + return txd; +} + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_prep_pq_val.c b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_pq_val.c new file mode 100644 index 00000000000000..b898530b28e12a --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_pq_val.c @@ -0,0 +1,210 @@ +/* + * Annapurna Labs DMA Linux driver - PQ validation preparation + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "al_dma.h" + +#define MAX_SIZE AL_DMA_MAX_SIZE_PQ_VAL + +/****************************************************************************** + *****************************************************************************/ +struct dma_async_tx_descriptor *al_dma_prep_pq_val_lock( + struct dma_chan *c, + dma_addr_t *pq, + dma_addr_t *src, + unsigned int src_cnt, + const unsigned char *scf, + size_t len, + enum sum_check_flags *pqres, + unsigned long flags) +{ + struct al_dma_chan *chan = to_al_dma_chan(c); + struct dma_async_tx_descriptor *txd = NULL; + int idx; + int32_t rc; + int sw_desc_num_req = ALIGN(len, MAX_SIZE) / MAX_SIZE; + int sw_desc_num_req_orig = sw_desc_num_req; + size_t len_orig = len; + dma_addr_t src_off = 0; + int i; + + unsigned char q_coefs[AL_DMA_OP_MAX_BLOCKS]; + + dev_dbg( + chan->device->common.dev, + "%s: p=%X, q=%X, src=%X, cnt=%d, len=%d, flags=%08x\n", + __func__, + (unsigned int)pq[0], + (unsigned int)pq[1], + (unsigned int)src, + src_cnt, + len, + (unsigned int)flags); + + if (unlikely((src_cnt + 2) > AL_DMA_MAX_XOR)) { + BUG(); + return NULL; + } + + if (unlikely( + (flags & DMA_PREP_PQ_DISABLE_P) && + (flags & DMA_PREP_PQ_DISABLE_Q))) { + BUG(); + return NULL; + } + + if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0)) + idx = chan->head; + else { + dev_dbg( + chan->device->common.dev, + "%s: al_dma_get_sw_desc_lock failed!\n", + __func__); + + return NULL; + } + + chan->sw_desc_num_locked = sw_desc_num_req; + + if (unlikely(sw_desc_num_req > 1)) + dev_dbg( + chan->device->common.dev, + "%s: splitting transaction to %d sub-transactions\n\n", + __func__, + sw_desc_num_req); + + memcpy(q_coefs, scf, src_cnt); + + while (sw_desc_num_req) { + int cur_len = (len > MAX_SIZE) ? MAX_SIZE : len; + + struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx); + + struct al_raid_transaction *xaction; + + if (1 == sw_desc_num_req) + txd = &desc->txd; + + if (1 == sw_desc_num_req) { + int umap_ent_cnt = 0; + dma_descriptor_unmap(txd); + desc->umap_ent_cnt = umap_ent_cnt; + } else + desc->umap_ent_cnt = 0; + + desc->txd.flags = flags; + desc->len = cur_len; + /* prepare hal transaction */ + xaction = &desc->hal_xaction; + memset(xaction, 0, sizeof(struct al_raid_transaction)); + xaction->op = AL_RAID_OP_PQ_VAL; + if (flags & DMA_PREP_PQ_DISABLE_P) + xaction->op = AL_RAID_OP_Q_VAL; + if (flags & DMA_PREP_PQ_DISABLE_Q) + xaction->op = AL_RAID_OP_P_VAL; + if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req)) + xaction->flags |= AL_SSM_INTERRUPT; + if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req)) + xaction->flags |= AL_SSM_BARRIER; + + /* use bufs[0] and block[i] for source buffers/blocks */ + for (i = 0; i < src_cnt; i++) { + desc->bufs[i].addr = src[i] + src_off; + desc->bufs[i].len = cur_len; + desc->blocks[i].bufs = &desc->bufs[i]; + desc->blocks[i].num = 1; + } + + if (!(flags & DMA_PREP_PQ_DISABLE_Q)) { + desc->bufs[i].addr = pq[1] + src_off; + desc->bufs[i].len = cur_len; + desc->blocks[i].bufs = &desc->bufs[i]; + desc->blocks[i].num = 1; + + q_coefs[i] = 1; /* Count Q */ + + xaction->q_index = i; + + i++; + } + + if (!(flags & DMA_PREP_PQ_DISABLE_P)) { + desc->bufs[i].addr = pq[0] + src_off; + desc->bufs[i].len = cur_len; + desc->blocks[i].bufs = &desc->bufs[i]; + desc->blocks[i].num = 1; + + q_coefs[i] = 0; /* Ignore P */ + + i++; + } + + xaction->srcs_blocks = &desc->blocks[0]; + xaction->num_of_srcs = i; + xaction->total_src_bufs = i; + + xaction->num_of_dsts = 0; + xaction->total_dst_bufs = 0; + + xaction->coefs = (uint8_t *)q_coefs; + + dev_dbg( + chan->device->common.dev, + "%s: xaction->flags = %08x\n", + __func__, + xaction->flags); + + /* send raid transaction to engine */ + rc = al_raid_dma_prepare(chan->hal_raid, chan->idx, + &desc->hal_xaction); + if (unlikely(rc)) { + dev_err( + chan->device->common.dev, + "%s: al_raid_dma_prepare failed!\n", __func__); + spin_unlock_bh(&chan->prep_lock); + return NULL; + } + + chan->tx_desc_produced += desc->hal_xaction.tx_descs_count; + BUG_ON(desc->hal_xaction.tx_descs_count > AL_DMA_MAX_XOR); + + desc->last_is_pq_val = 1; + desc->pq_val_res = pqres; + *desc->pq_val_res = 0; + + idx++; + sw_desc_num_req--; + len -= MAX_SIZE; + src_off += MAX_SIZE; + } + + AL_DMA_STATS_UPDATE( + chan, + chan->stats_prep.pq_val_num, + sw_desc_num_req_orig, + chan->stats_prep.pq_val_size, + len_orig); + + al_dma_tx_submit_sw_cond_unlock(chan, txd); + + return txd; +} + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_prep_xor.c b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_xor.c new file mode 100644 index 00000000000000..d07c3eeeaac6f5 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_xor.c @@ -0,0 +1,183 @@ +/* + * Annapurna Labs DMA Linux driver - XOR preparation + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "al_dma.h" + +#define MAX_SIZE AL_DMA_MAX_SIZE_XOR + +/****************************************************************************** + *****************************************************************************/ +struct dma_async_tx_descriptor *al_dma_prep_xor_lock( + struct dma_chan *c, + dma_addr_t dest, + dma_addr_t *src, + unsigned int src_cnt, + size_t len, + unsigned long flags) +{ + struct al_dma_chan *chan = to_al_dma_chan(c); + struct dma_async_tx_descriptor *txd = NULL; + int idx; + int32_t rc; + int sw_desc_num_req = ALIGN(len, MAX_SIZE) / MAX_SIZE; + int sw_desc_num_req_orig = sw_desc_num_req; + size_t len_orig = len; + dma_addr_t src_off = 0; + int i; + + dev_dbg( + chan->device->common.dev, + "%s: chan->idx = %d, dest = %X, src = %X, cnt = %d, len = %d," + " flags = %08x\n", + __func__, + chan->idx, + (unsigned int)dest, + (unsigned int)src, + src_cnt, + len, + (unsigned int)flags); + + if (unlikely(src_cnt > AL_DMA_MAX_XOR)) { + BUG(); + return NULL; + } + + if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0)) + idx = chan->head; + else { + dev_dbg( + chan->device->common.dev, + "%s: al_dma_get_sw_desc_lock failed!\n", + __func__); + + return NULL; + } + + chan->sw_desc_num_locked = sw_desc_num_req; + + if (unlikely(sw_desc_num_req > 1)) + dev_dbg( + chan->device->common.dev, + "%s: splitting transaction to %d sub-transactions\n\n", + __func__, + sw_desc_num_req); + + while (sw_desc_num_req) { + int cur_len = (len > MAX_SIZE) ? MAX_SIZE : len; + + struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx); + + struct al_raid_transaction *xaction; + + if (1 == sw_desc_num_req) + txd = &desc->txd; + + if (1 == sw_desc_num_req) { + int umap_ent_cnt = 0; + dma_descriptor_unmap(txd); + desc->umap_ent_cnt = umap_ent_cnt; + } else + desc->umap_ent_cnt = 0; + + desc->txd.flags = flags; + desc->len = cur_len; + /* prepare hal transaction */ + xaction = &desc->hal_xaction; + memset(xaction, 0, sizeof(struct al_raid_transaction)); + xaction->op = AL_RAID_OP_P_CALC; + if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req)) + xaction->flags |= AL_SSM_INTERRUPT; + if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req)) + xaction->flags |= AL_SSM_BARRIER; + + /* use bufs[0] and block[i] for source buffers/blocks */ + for (i = 0; i < src_cnt; i++) { + desc->bufs[i].addr = src[i] + src_off; + desc->bufs[i].len = cur_len; + desc->blocks[i].bufs = &desc->bufs[i]; + desc->blocks[i].num = 1; + } + + xaction->srcs_blocks = &desc->blocks[0]; + xaction->num_of_srcs = src_cnt; + xaction->total_src_bufs = src_cnt; + + /* use bufs[1] and block[1] for destination buffers/blocks */ + desc->bufs[src_cnt].addr = dest; + desc->bufs[src_cnt].len = cur_len; + desc->blocks[src_cnt].bufs = &desc->bufs[src_cnt]; + desc->blocks[src_cnt].num = 1; + + xaction->dsts_blocks = &desc->blocks[src_cnt]; + xaction->num_of_dsts = 1; + xaction->total_dst_bufs = 1; + + dev_dbg( + chan->device->common.dev, + "%s: xaction->flags = %08x\n", + __func__, + xaction->flags); + + /* send raid transaction to engine */ + rc = al_raid_dma_prepare(chan->hal_raid, chan->idx, + &desc->hal_xaction); + if (unlikely(rc)) { + dev_err( + chan->device->common.dev, + "%s: al_raid_dma_prepare failed!\n", __func__); + spin_unlock_bh(&chan->prep_lock); + return NULL; + } + + chan->tx_desc_produced += desc->hal_xaction.tx_descs_count; + BUG_ON(desc->hal_xaction.tx_descs_count > AL_DMA_MAX_XOR); + + desc->last_is_xor = 1; + +#ifdef AL_DMA_XOR_VALIDATION + desc->xor_dest = phys_to_virt(dest); + desc->xor_len = cur_len; + desc->xor_src_cnt = src_cnt; + + for (i = 0; i < src_cnt; i++) + desc->xor_src[i] = phys_to_virt(src[i] + src_off); +#endif + + idx++; + sw_desc_num_req--; + len -= MAX_SIZE; + dest += MAX_SIZE; + src_off += MAX_SIZE; + } + + AL_DMA_STATS_UPDATE( + chan, + chan->stats_prep.xor_num, + sw_desc_num_req_orig, + chan->stats_prep.xor_size, + len_orig); + + al_dma_tx_submit_sw_cond_unlock(chan, txd); + + return txd; +} + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_prep_xor_val.c b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_xor_val.c new file mode 100644 index 00000000000000..ef2d64cdd1b335 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_prep_xor_val.c @@ -0,0 +1,172 @@ +/* + * Annapurna Labs DMA Linux driver - XOR validation preparation + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "al_dma.h" + +#define MAX_SIZE AL_DMA_MAX_SIZE_XOR_VAL + +/****************************************************************************** + *****************************************************************************/ +struct dma_async_tx_descriptor *al_dma_prep_xor_val_lock( + struct dma_chan *c, + dma_addr_t *src, + unsigned int src_cnt, + size_t len, + enum sum_check_flags *result, + unsigned long flags) +{ + struct al_dma_chan *chan = to_al_dma_chan(c); + struct dma_async_tx_descriptor *txd = NULL; + int idx; + int32_t rc; + int sw_desc_num_req = ALIGN(len, MAX_SIZE) / MAX_SIZE; + int sw_desc_num_req_orig = sw_desc_num_req; + size_t len_orig = len; + dma_addr_t src_off = 0; + int i; + + dev_dbg( + chan->device->common.dev, + "%s: src = %X, cnt = %d, len = %d, flags = %08x\n", + __func__, + (unsigned int)src, + src_cnt, + len, + (unsigned int)flags); + + if (unlikely(src_cnt > AL_DMA_MAX_XOR)) { + BUG(); + return NULL; + } + + if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0)) + idx = chan->head; + else { + dev_dbg( + chan->device->common.dev, + "%s: al_dma_get_sw_desc_lock failed!\n", + __func__); + + return NULL; + } + + chan->sw_desc_num_locked = sw_desc_num_req; + + if (unlikely(sw_desc_num_req > 1)) + dev_dbg( + chan->device->common.dev, + "%s: splitting transaction to %d sub-transactions\n\n", + __func__, + sw_desc_num_req); + + while (sw_desc_num_req) { + int cur_len = (len > MAX_SIZE) ? MAX_SIZE : len; + + struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx); + + struct al_raid_transaction *xaction; + + if (1 == sw_desc_num_req) + txd = &desc->txd; + + if (1 == sw_desc_num_req) { + int umap_ent_cnt = 0; + dma_descriptor_unmap(txd); + desc->umap_ent_cnt = umap_ent_cnt; + } else + desc->umap_ent_cnt = 0; + + desc->txd.flags = flags; + desc->len = cur_len; + /* prepare hal transaction */ + xaction = &desc->hal_xaction; + memset(xaction, 0, sizeof(struct al_raid_transaction)); + xaction->op = AL_RAID_OP_P_VAL; + if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req)) + xaction->flags |= AL_SSM_INTERRUPT; + if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req)) + xaction->flags |= AL_SSM_BARRIER; + + if (flags & (~(DMA_PREP_INTERRUPT | DMA_PREP_FENCE))) + dev_err( + chan->device->common.dev, + "%s: flags = %08x\n", + __func__, + (unsigned int)flags); + + /* use bufs[0] and block[i] for source buffers/blocks */ + for (i = 0; i < src_cnt; i++) { + desc->bufs[i].addr = src[i] + src_off; + desc->bufs[i].len = cur_len; + desc->blocks[i].bufs = &desc->bufs[i]; + desc->blocks[i].num = 1; + } + + xaction->srcs_blocks = &desc->blocks[0]; + xaction->num_of_srcs = i; + xaction->total_src_bufs = i; + + xaction->num_of_dsts = 0; + xaction->total_dst_bufs = 0; + + dev_dbg( + chan->device->common.dev, + "%s: xaction->flags = %08x\n", + __func__, + xaction->flags); + + /* send raid transaction to engine */ + rc = al_raid_dma_prepare(chan->hal_raid, chan->idx, + &desc->hal_xaction); + if (unlikely(rc)) { + dev_err( + chan->device->common.dev, + "%s: al_raid_dma_prepare failed!\n", __func__); + spin_unlock_bh(&chan->prep_lock); + return NULL; + } + + chan->tx_desc_produced += desc->hal_xaction.tx_descs_count; + BUG_ON(desc->hal_xaction.tx_descs_count > AL_DMA_MAX_XOR); + + desc->last_is_xor_val = 1; + desc->xor_val_res = result; + *desc->xor_val_res = 0; + + idx++; + sw_desc_num_req--; + len -= MAX_SIZE; + src_off += MAX_SIZE; + } + + AL_DMA_STATS_UPDATE( + chan, + chan->stats_prep.xor_val_num, + sw_desc_num_req_orig, + chan->stats_prep.xor_val_size, + len_orig); + + al_dma_tx_submit_sw_cond_unlock(chan, txd); + + return txd; +} + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_sysfs.c b/target/linux/alpine/files/drivers/dma/al/al_dma_sysfs.c new file mode 100644 index 00000000000000..47ee48cae337d3 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_sysfs.c @@ -0,0 +1,443 @@ +/* + * Annapurna Labs DMA Linux driver - sysfs support + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include + +#include +#include "al_dma.h" + +#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) + +#define DEVICE_STATS_PREP_ATTR(_name) {\ + __ATTR(stats_prep_##_name, 0440, rd_stats_prep, NULL),\ + (void*)offsetof(struct al_dma_chan_stats_prep, _name) } + +#define DEVICE_STATS_COMP_ATTR(_name) {\ + __ATTR(stats_comp_##_name, 0440, rd_stats_comp, NULL),\ + (void*)offsetof(struct al_dma_chan_stats_comp, _name) } + +#define UDMA_DUMP_PREP_ATTR(_name, _type) {\ + __ATTR(udma_dump_##_name, 0660, rd_udma_dump, wr_udma_dump),\ + (void*)_type } + +enum udma_dump_type { + UDMA_DUMP_M2S_REGS, + UDMA_DUMP_M2S_Q_STRUCT, + UDMA_DUMP_M2S_Q_POINTERS, + UDMA_DUMP_S2M_REGS, + UDMA_DUMP_S2M_Q_STRUCT, + UDMA_DUMP_S2M_Q_POINTERS +}; + +#ifdef CONFIG_AL_DMA_STATS +static ssize_t rd_stats_prep( + struct device *dev, + struct device_attribute *attr, + char *buf); + +static ssize_t rd_stats_comp( + struct device *dev, + struct device_attribute *attr, + char *buf); + +static ssize_t rd_stats_rst( + struct device *dev, + struct device_attribute *attr, + char *buf); + +static ssize_t wr_stats_rst( + struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count); + +static ssize_t rd_udma_dump( + struct device *dev, + struct device_attribute *attr, + char *buf); + +static ssize_t wr_udma_dump( + struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count); +#endif + +#ifdef CONFIG_AL_DMA_STATS +struct dev_ext_attribute dev_attr_stats_prep[] = { + DEVICE_STATS_PREP_ATTR(int_num), + DEVICE_STATS_PREP_ATTR(memcpy_num), + DEVICE_STATS_PREP_ATTR(memcpy_size), + DEVICE_STATS_PREP_ATTR(sg_memcpy_num), + DEVICE_STATS_PREP_ATTR(sg_memcpy_size), + DEVICE_STATS_PREP_ATTR(memset_num), + DEVICE_STATS_PREP_ATTR(memset_size), + DEVICE_STATS_PREP_ATTR(xor_num), + DEVICE_STATS_PREP_ATTR(xor_size), + DEVICE_STATS_PREP_ATTR(pq_num), + DEVICE_STATS_PREP_ATTR(pq_size), + DEVICE_STATS_PREP_ATTR(pq_val_num), + DEVICE_STATS_PREP_ATTR(pq_val_size), + DEVICE_STATS_PREP_ATTR(xor_val_num), + DEVICE_STATS_PREP_ATTR(xor_val_size), + DEVICE_STATS_PREP_ATTR(matching_cpu), + DEVICE_STATS_PREP_ATTR(mismatching_cpu), +}; + +struct dev_ext_attribute dev_attr_stats_comp[] = { + DEVICE_STATS_COMP_ATTR(redundant_int_cnt), + DEVICE_STATS_COMP_ATTR(matching_cpu), + DEVICE_STATS_COMP_ATTR(mismatching_cpu), +}; + +/* Device attrs - udma debug */ +static struct dev_ext_attribute dev_attr_udma_debug[] = { + UDMA_DUMP_PREP_ATTR(m2s_regs, UDMA_DUMP_M2S_REGS), + UDMA_DUMP_PREP_ATTR(m2s_q_struct, UDMA_DUMP_M2S_Q_STRUCT), + UDMA_DUMP_PREP_ATTR(m2s_q_pointers, UDMA_DUMP_M2S_Q_POINTERS), + UDMA_DUMP_PREP_ATTR(s2m_regs, UDMA_DUMP_S2M_REGS), + UDMA_DUMP_PREP_ATTR(s2m_q_struct, UDMA_DUMP_S2M_Q_STRUCT), + UDMA_DUMP_PREP_ATTR(s2m_q_pointers, UDMA_DUMP_S2M_Q_POINTERS) +}; + +static DEVICE_ATTR(stats_rst, 0660, rd_stats_rst, wr_stats_rst); +#endif + +/****************************************************************************** + *****************************************************************************/ +int al_dma_sysfs_init( + struct device *dev) +{ + int status = 0; + +#ifdef CONFIG_AL_DMA_STATS + int i; + + for (i = 0; i < ARRAY_SIZE(dev_attr_stats_prep); i++) { + status = sysfs_create_file( + &dev->kobj, + &dev_attr_stats_prep[i].attr.attr); + if (status) { + dev_err( + dev, + "%s: sysfs_create_file(stats_prep %d) failed\n", + __func__, + i); + goto done; + } + } + + for (i = 0; i < ARRAY_SIZE(dev_attr_stats_comp); i++) { + status = sysfs_create_file( + &dev->kobj, + &dev_attr_stats_comp[i].attr.attr); + if (status) { + dev_err( + dev, + "%s: sysfs_create_file(stats_comp %d) failed\n", + __func__, + i); + goto done; + } + } + + status = sysfs_create_file( + &dev->kobj, + &dev_attr_stats_rst.attr); + if (status) { + dev_err( + dev, + "%s: sysfs_create_file(stats_rst) failed\n", + __func__); + goto done; + } + + for (i = 0; i < ARRAY_SIZE(dev_attr_udma_debug); i++ ) { + status = sysfs_create_file( + &dev->kobj, + &dev_attr_udma_debug[i].attr.attr); + if (status) { + dev_err( + dev, + "%s: sysfs_create_file(stats_udma %d) failed\n", + __func__, + i); + goto done; + } + } +done: +#endif + + return status; +} + +/****************************************************************************** + *****************************************************************************/ +void al_dma_sysfs_terminate( + struct device *dev) +{ +#ifdef CONFIG_AL_DMA_STATS + int i; + + for (i = 0; i < ARRAY_SIZE(dev_attr_stats_prep); i++) + sysfs_remove_file( + &dev->kobj, + &dev_attr_stats_prep[i].attr.attr); + + for (i = 0; i < ARRAY_SIZE(dev_attr_stats_comp); i++) + sysfs_remove_file( + &dev->kobj, + &dev_attr_stats_comp[i].attr.attr); + + sysfs_remove_file(&dev->kobj, &dev_attr_stats_rst.attr); + + for (i = 0; i < ARRAY_SIZE(dev_attr_udma_debug); i++) { + sysfs_remove_file( + &dev->kobj, + &dev_attr_udma_debug[i].attr.attr); + } + +#endif +} + +#ifdef CONFIG_AL_DMA_STATS +/****************************************************************************** + *****************************************************************************/ +static ssize_t rd_stats_prep( + struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct al_dma_device *device = dev_get_drvdata(dev); + struct dev_ext_attribute *ea = to_ext_attr(attr); + uintptr_t offset = (uintptr_t)ea->var; + int i; + ssize_t size = 0; + + size += sprintf( + &buf[size], + "statistics - %s:\n", + attr->attr.name); + + for (i = 0; i < device->max_channels; i++) { + uint64_t val; + + spin_lock_bh(&device->channels[i]->prep_lock); + + val = *(uint64_t *)(((uint8_t*)&device->channels[i] + ->stats_prep) + offset); + + spin_unlock_bh(&device->channels[i]->prep_lock); + + size += sprintf( + &buf[size], + "chan[%d] %llu\n", + i, + val); + } + + return size; +} + +/****************************************************************************** + *****************************************************************************/ +static ssize_t rd_stats_comp( + struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct al_dma_device *device = dev_get_drvdata(dev); + struct dev_ext_attribute *ea = to_ext_attr(attr); + uintptr_t offset = (uintptr_t)ea->var; + int i; + ssize_t size = 0; + + size += sprintf( + &buf[size], + "statistics - %s:\n", + attr->attr.name); + + for (i = 0; i < device->max_channels; i++) { + uint64_t val; + + spin_lock_bh(&device->channels[i]->cleanup_lock); + + val = *(uint64_t *)(((uint8_t*)&device->channels[i] + ->stats_comp) + offset); + + spin_unlock_bh(&device->channels[i]->cleanup_lock); + + size += sprintf( + &buf[size], + "chan[%d] %llu\n", + i, + val); + } + + return size; +} + +/****************************************************************************** + *****************************************************************************/ +static ssize_t rd_stats_rst( + struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf( + buf, + "Write anything to clear all statistics\n"); +} + +/****************************************************************************** + *****************************************************************************/ +static ssize_t wr_stats_rst( + struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct al_dma_device *device = dev_get_drvdata(dev); + + int i; + + for (i = 0; i < device->max_channels; i++) { + spin_lock_bh(&device->channels[i]->prep_lock); + spin_lock_bh(&device->channels[i]->cleanup_lock); + + memset( + &device->channels[i]->stats_prep, + 0, + sizeof(struct al_dma_chan_stats_prep)); + + memset( + &device->channels[i]->stats_comp, + 0, + sizeof(struct al_dma_chan_stats_comp)); + + spin_unlock_bh(&device->channels[i]->cleanup_lock); + spin_unlock_bh(&device->channels[i]->prep_lock); + } + + return i; +} + +/****************************************************************************** + *****************************************************************************/ +static ssize_t rd_udma_dump( + struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + enum udma_dump_type dump_type = (enum udma_dump_type)ea->var; + ssize_t rc = 0; + + switch (dump_type) { + case UDMA_DUMP_M2S_REGS: + case UDMA_DUMP_S2M_REGS: + rc = sprintf( + buf, + "Write mask to dump corresponding udma regs\n"); + break; + case UDMA_DUMP_M2S_Q_STRUCT: + case UDMA_DUMP_S2M_Q_STRUCT: + rc = sprintf( + buf, + "Write q num to dump correspoding q struct\n"); + break; + case UDMA_DUMP_M2S_Q_POINTERS: + case UDMA_DUMP_S2M_Q_POINTERS: + rc = sprintf( + buf, + "Write q num (in hex) and add 1 for submission ring," + " for ex:\n" + "0 for completion ring of q 0\n" + "10 for submission ring of q 0\n"); + break; + default: + break; + } + + return rc; +} + +/****************************************************************************** + *****************************************************************************/ +static ssize_t wr_udma_dump( + struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int err; + int q_id; + unsigned long val; + struct al_udma* dma; + struct dev_ext_attribute *ea = to_ext_attr(attr); + enum udma_dump_type dump_type = (enum udma_dump_type)ea->var; + enum al_udma_ring_type ring_type = AL_RING_COMPLETION; + struct al_dma_device *device = dev_get_drvdata(dev); + + err = kstrtoul(buf, 16, &val); + if (err < 0) + return err; + + switch (dump_type) { + case UDMA_DUMP_M2S_REGS: + al_ssm_dma_handle_get(&device->hal_raid, UDMA_TX, &dma); + al_udma_regs_print(dma, val); + break; + case UDMA_DUMP_S2M_REGS: + al_ssm_dma_handle_get(&device->hal_raid, UDMA_RX, &dma); + al_udma_regs_print(dma, val); + break; + case UDMA_DUMP_M2S_Q_STRUCT: + al_ssm_dma_handle_get(&device->hal_raid, UDMA_TX, &dma); + al_udma_q_struct_print(dma, val); + break; + case UDMA_DUMP_S2M_Q_STRUCT: + al_ssm_dma_handle_get(&device->hal_raid, UDMA_RX, &dma); + al_udma_q_struct_print(dma, val); + break; + case UDMA_DUMP_M2S_Q_POINTERS: + if (val & 0x10) + ring_type = AL_RING_SUBMISSION; + q_id = val & 0xf; + al_ssm_dma_handle_get(&device->hal_raid, UDMA_TX, &dma); + al_udma_ring_print(dma, q_id, ring_type); + break; + case UDMA_DUMP_S2M_Q_POINTERS: + if (val & 0x10) + ring_type = AL_RING_SUBMISSION; + q_id = val & 0xf; + al_ssm_dma_handle_get(&device->hal_raid, UDMA_RX, &dma); + al_udma_ring_print(dma, q_id, ring_type); + break; + default: + break; + } + + return count; +} +#endif + diff --git a/target/linux/alpine/files/drivers/dma/al/al_dma_sysfs.h b/target/linux/alpine/files/drivers/dma/al/al_dma_sysfs.h new file mode 100644 index 00000000000000..f3733f221e99e9 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_dma_sysfs.h @@ -0,0 +1,28 @@ +/* + * Annapurna Labs DMA Linux driver - sysfs support declarations + * Copyright(c) 2011 Annapurna Labs. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +int al_dma_sysfs_init( + struct device *dev); + +void al_dma_sysfs_terminate( + struct device *dev); + diff --git a/target/linux/alpine/files/drivers/dma/al/al_hal_ssm_raid.c b/target/linux/alpine/files/drivers/dma/al/al_hal_ssm_raid.c new file mode 100644 index 00000000000000..e3b7e50a280a65 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_hal_ssm_raid.c @@ -0,0 +1,578 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @{ + * @file al_hal_ssm_raid.c + * + */ + +#include +#include "al_hal_ssm_raid.h" +#include "al_hal_ssm_raid_regs.h" + +#define RX_COMP_STATUS_MASK \ + (AL_RAID_P_VAL_ERROR | AL_RAID_Q_VAL_ERROR |\ + AL_RAID_BUS_PARITY_ERROR | AL_RAID_SOURCE_LEN_ERROR |\ + AL_RAID_CMD_DECODE_ERROR | AL_RAID_INTERNAL_ERROR |\ + AL_RAID_REDIRECTED_TRANSACTION |\ + AL_RAID_REDIRECTED_SRC_UDMA |\ + AL_RAID_REDIRECTED_SRC_QUEUE) + +/** operation attributes */ +struct al_op_attr { + uint32_t opcode; /* hw opcode */ + uint32_t meta_descs; /* number of meta descs needed for the operation */ + al_bool have_g_coef; + al_bool have_p_coef; +}; + +static const struct al_op_attr op_attr_table[] = { + /* opcode meta g coef p coef */ + {AL_RAID_MEM_CPY_OPCODE, 0, AL_FALSE, AL_FALSE}, + {AL_RAID_MEM_SET_OPCODE, 1, AL_FALSE, AL_FALSE}, + {AL_RAID_MEM_SCRUB_OPCODE, 0, AL_FALSE, AL_FALSE}, + {AL_RAID_MEM_CMP_OPCODE, 4, AL_FALSE, AL_FALSE}, + {AL_RAID_NOP_OPCODE, 1, AL_FALSE, AL_FALSE}, + {AL_RAID_P_CALC_OPCODE, 0, AL_FALSE, AL_TRUE}, + {AL_RAID_Q_CALC_OPCODE, 0, AL_TRUE, AL_FALSE}, + {AL_RAID_PQ_CALC_OPCODE, 0, AL_TRUE, AL_TRUE}, + {AL_RAID_P_VAL_OPCODE, 0, AL_FALSE, AL_TRUE}, + {AL_RAID_Q_VAL_OPCODE, 0, AL_TRUE, AL_FALSE}, + {AL_RAID_PQ_VAL_OPCODE, 0, AL_TRUE, AL_TRUE}, +}; + +#define GF_SIZE 256 + +static const uint8_t gflog[GF_SIZE] = { + 0xff, 0x00, 0x01, 0x19, 0x02, 0x32, 0x1a, 0xc6, + 0x03, 0xdf, 0x33, 0xee, 0x1b, 0x68, 0xc7, 0x4b, + 0x04, 0x64, 0xe0, 0x0e, 0x34, 0x8d, 0xef, 0x81, + 0x1c, 0xc1, 0x69, 0xf8, 0xc8, 0x08, 0x4c, 0x71, + 0x05, 0x8a, 0x65, 0x2f, 0xe1, 0x24, 0x0f, 0x21, + 0x35, 0x93, 0x8e, 0xda, 0xf0, 0x12, 0x82, 0x45, + 0x1d, 0xb5, 0xc2, 0x7d, 0x6a, 0x27, 0xf9, 0xb9, + 0xc9, 0x9a, 0x09, 0x78, 0x4d, 0xe4, 0x72, 0xa6, + 0x06, 0xbf, 0x8b, 0x62, 0x66, 0xdd, 0x30, 0xfd, + 0xe2, 0x98, 0x25, 0xb3, 0x10, 0x91, 0x22, 0x88, + 0x36, 0xd0, 0x94, 0xce, 0x8f, 0x96, 0xdb, 0xbd, + 0xf1, 0xd2, 0x13, 0x5c, 0x83, 0x38, 0x46, 0x40, + 0x1e, 0x42, 0xb6, 0xa3, 0xc3, 0x48, 0x7e, 0x6e, + 0x6b, 0x3a, 0x28, 0x54, 0xfa, 0x85, 0xba, 0x3d, + 0xca, 0x5e, 0x9b, 0x9f, 0x0a, 0x15, 0x79, 0x2b, + 0x4e, 0xd4, 0xe5, 0xac, 0x73, 0xf3, 0xa7, 0x57, + 0x07, 0x70, 0xc0, 0xf7, 0x8c, 0x80, 0x63, 0x0d, + 0x67, 0x4a, 0xde, 0xed, 0x31, 0xc5, 0xfe, 0x18, + 0xe3, 0xa5, 0x99, 0x77, 0x26, 0xb8, 0xb4, 0x7c, + 0x11, 0x44, 0x92, 0xd9, 0x23, 0x20, 0x89, 0x2e, + 0x37, 0x3f, 0xd1, 0x5b, 0x95, 0xbc, 0xcf, 0xcd, + 0x90, 0x87, 0x97, 0xb2, 0xdc, 0xfc, 0xbe, 0x61, + 0xf2, 0x56, 0xd3, 0xab, 0x14, 0x2a, 0x5d, 0x9e, + 0x84, 0x3c, 0x39, 0x53, 0x47, 0x6d, 0x41, 0xa2, + 0x1f, 0x2d, 0x43, 0xd8, 0xb7, 0x7b, 0xa4, 0x76, + 0xc4, 0x17, 0x49, 0xec, 0x7f, 0x0c, 0x6f, 0xf6, + 0x6c, 0xa1, 0x3b, 0x52, 0x29, 0x9d, 0x55, 0xaa, + 0xfb, 0x60, 0x86, 0xb1, 0xbb, 0xcc, 0x3e, 0x5a, + 0xcb, 0x59, 0x5f, 0xb0, 0x9c, 0xa9, 0xa0, 0x51, + 0x0b, 0xf5, 0x16, 0xeb, 0x7a, 0x75, 0x2c, 0xd7, + 0x4f, 0xae, 0xd5, 0xe9, 0xe6, 0xe7, 0xad, 0xe8, + 0x74, 0xd6, 0xf4, 0xea, 0xa8, 0x50, 0x58, 0xaf +}; + +static const uint8_t gfilog[GF_SIZE] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x1d, 0x3a, 0x74, 0xe8, 0xcd, 0x87, 0x13, 0x26, + 0x4c, 0x98, 0x2d, 0x5a, 0xb4, 0x75, 0xea, 0xc9, + 0x8f, 0x03, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, + 0x9d, 0x27, 0x4e, 0x9c, 0x25, 0x4a, 0x94, 0x35, + 0x6a, 0xd4, 0xb5, 0x77, 0xee, 0xc1, 0x9f, 0x23, + 0x46, 0x8c, 0x05, 0x0a, 0x14, 0x28, 0x50, 0xa0, + 0x5d, 0xba, 0x69, 0xd2, 0xb9, 0x6f, 0xde, 0xa1, + 0x5f, 0xbe, 0x61, 0xc2, 0x99, 0x2f, 0x5e, 0xbc, + 0x65, 0xca, 0x89, 0x0f, 0x1e, 0x3c, 0x78, 0xf0, + 0xfd, 0xe7, 0xd3, 0xbb, 0x6b, 0xd6, 0xb1, 0x7f, + 0xfe, 0xe1, 0xdf, 0xa3, 0x5b, 0xb6, 0x71, 0xe2, + 0xd9, 0xaf, 0x43, 0x86, 0x11, 0x22, 0x44, 0x88, + 0x0d, 0x1a, 0x34, 0x68, 0xd0, 0xbd, 0x67, 0xce, + 0x81, 0x1f, 0x3e, 0x7c, 0xf8, 0xed, 0xc7, 0x93, + 0x3b, 0x76, 0xec, 0xc5, 0x97, 0x33, 0x66, 0xcc, + 0x85, 0x17, 0x2e, 0x5c, 0xb8, 0x6d, 0xda, 0xa9, + 0x4f, 0x9e, 0x21, 0x42, 0x84, 0x15, 0x2a, 0x54, + 0xa8, 0x4d, 0x9a, 0x29, 0x52, 0xa4, 0x55, 0xaa, + 0x49, 0x92, 0x39, 0x72, 0xe4, 0xd5, 0xb7, 0x73, + 0xe6, 0xd1, 0xbf, 0x63, 0xc6, 0x91, 0x3f, 0x7e, + 0xfc, 0xe5, 0xd7, 0xb3, 0x7b, 0xf6, 0xf1, 0xff, + 0xe3, 0xdb, 0xab, 0x4b, 0x96, 0x31, 0x62, 0xc4, + 0x95, 0x37, 0x6e, 0xdc, 0xa5, 0x57, 0xae, 0x41, + 0x82, 0x19, 0x32, 0x64, 0xc8, 0x8d, 0x07, 0x0e, + 0x1c, 0x38, 0x70, 0xe0, 0xdd, 0xa7, 0x53, 0xa6, + 0x51, 0xa2, 0x59, 0xb2, 0x79, 0xf2, 0xf9, 0xef, + 0xc3, 0x9b, 0x2b, 0x56, 0xac, 0x45, 0x8a, 0x09, + 0x12, 0x24, 0x48, 0x90, 0x3d, 0x7a, 0xf4, 0xf5, + 0xf7, 0xf3, 0xfb, 0xeb, 0xcb, 0x8b, 0x0b, 0x16, + 0x2c, 0x58, 0xb0, 0x7d, 0xfa, 0xe9, 0xcf, 0x83, + 0x1b, 0x36, 0x6c, 0xd8, 0xad, 0x47, 0x8e, 0x00 +}; + +static void _al_raid_load_table( + struct raid_accelerator_regs __iomem *app_regs, + int gflog, + const uint8_t *table) +{ + uint32_t *base_reg; + int i; + + if (gflog) + base_reg = &app_regs->gflog_table[0].w0_raw; + else + base_reg = &app_regs->gfilog_table[0].w0_r; + + for (i = 0; i < GF_SIZE/4; i++) { + int table_idx = i << 2; /* *4 */ + uint32_t reg = (table[table_idx + 3] << 24) | + (table[table_idx + 2] << 16) | + (table[table_idx + 1] << 8) | + table[table_idx]; + + al_reg_write32(base_reg + i, reg); + } +} + +/** + * get number of rx submission descriptors needed to the transaction + * + * we need rx descriptor for each destination buffer. + * if the transaction doesn't have destination buffers, then one* + * descriptor is needed + * + * @param xaction transaction context + * + * @return number of rx submission descriptors + */ +INLINE uint32_t _al_raid_xaction_rx_descs_count( + struct al_raid_transaction *xaction) +{ + return xaction->total_dst_bufs ? xaction->total_dst_bufs : 1; +} + +/** + * get number of tx submission descriptors needed to the transaction + * + * we need tx descriptor for each source buffer. + * MEM_SET needs 1 Meta descriptor, MEM_CMP needs 4 + * and we need at least one descriptor for the opcode + * + * @param xaction transaction context + * @param meta number of meta descriptors + * + * @return number of tx submission descriptors + */ +INLINE uint32_t _al_raid_xaction_tx_descs_count( + struct al_raid_transaction *xaction, + uint32_t meta) +{ + uint32_t count = xaction->total_src_bufs + meta; + return count ? count : 1; +} + +/** + * prepare the rx submission descriptors + * this function writes the contents of the rx submission descriptors + * @param rx_udma_q rx udma handle + * @param xaction transaction context + */ +static void _al_raid_set_rx_descs( + struct al_udma_q *rx_udma_q, + struct al_raid_transaction *xaction) +{ + uint32_t flags = 0; + union al_udma_desc *rx_desc; + struct al_block *block; + uint32_t blk_idx; + + if (xaction->flags & AL_SSM_INTERRUPT) + flags = AL_M2S_DESC_INT_EN; + if (xaction->flags & AL_SSM_DEST_NO_SNOOP) + flags |= AL_M2S_DESC_NO_SNOOP_H; + + /* if the xaction doesn't have destination buffers, allocate single + Meta descriptor */ + if (xaction->total_dst_bufs == 0) { + rx_desc = al_udma_desc_get(rx_udma_q); + flags |= al_udma_ring_id_get(rx_udma_q) << + AL_M2S_DESC_RING_ID_SHIFT; + flags |= AL_RAID_RX_DESC_META; + /* write back flags */ + rx_desc->rx.len_ctrl = swap32_to_le(flags); + return; + } + + /* for each desc set buffer length, address */ + block = xaction->dsts_blocks; + for (blk_idx = 0; blk_idx < xaction->num_of_dsts; blk_idx++) { + struct al_buf *buf = block->bufs; + unsigned int buf_idx = 0; + for (; buf_idx < block->num; buf_idx++) { + uint64_t vmid = ((uint64_t)block->vmid) << + AL_UDMA_DESC_VMID_SHIFT; + uint32_t flags_len = flags; + uint32_t ring_id; + + rx_desc = al_udma_desc_get(rx_udma_q); + /* get ring id */ + ring_id = al_udma_ring_id_get(rx_udma_q) + << AL_M2S_DESC_RING_ID_SHIFT; + + flags_len |= ring_id; + + flags_len |= buf->len & AL_M2S_DESC_LEN_MASK; + rx_desc->rx.len_ctrl = swap32_to_le(flags_len); + rx_desc->rx.buf1_ptr = swap64_to_le(buf->addr | vmid); + buf++; + } + block++; + } +} + +/** + * calculate the total length of rx buffers + * + * @param xaction transaction context + */ +static uint32_t _al_raid_rx_get_len(struct al_raid_transaction *xaction) +{ + uint32_t total_len = 0; + uint32_t blk_idx; + struct al_block *block = xaction->dsts_blocks; + + for (blk_idx = 0; blk_idx < xaction->num_of_dsts; blk_idx++) { + struct al_buf *buf = block->bufs; + unsigned int buf_idx = 0; + for (; buf_idx < block->num; buf_idx++) { + total_len += buf->len; + buf++; + } + } + return total_len; +} + +/** + * fill the tx submission descriptors + * this function writes the contents of the tx submission descriptors + * @param tx_udma_q tx udma handle + * @param xaction transaction context + * @param meta number of meta descriptors used by this xaction + * @param op_attr operation attributes + */ +static void _al_raid_set_tx_descs( + struct al_udma_q *tx_udma_q, + struct al_raid_transaction *xaction, + uint32_t meta, + const struct al_op_attr *op_attr) +{ + union al_udma_desc *tx_desc; + uint32_t flags = AL_M2S_DESC_FIRST; + struct al_block *block; + uint32_t blk_idx; + + + for (blk_idx = 0; blk_idx < meta; blk_idx++) { + uint32_t flags_len = flags; + uint32_t ring_id; + + /* clear first flags */ + flags = 0; + + /* get next descriptor */ + tx_desc = al_udma_desc_get(tx_udma_q); + /* get ring id */ + ring_id = al_udma_ring_id_get(tx_udma_q) << + AL_M2S_DESC_RING_ID_SHIFT; + + flags_len |= ring_id; + + /* write descriptor's flags */ + flags_len |= AL_M2S_DESC_META_DATA; + + /* set LAST flag */ + if ((blk_idx == (meta - 1)) && (xaction->num_of_srcs == 0)) { + flags_len |= AL_M2S_DESC_LAST; + if (xaction->flags & AL_SSM_BARRIER) + flags_len |= AL_M2S_DESC_DMB; + } + + if (blk_idx == 0) { + uint32_t attr = op_attr->opcode; + + if (xaction->op == AL_RAID_OP_MEM_SET) + attr |= xaction->mem_set_flags; + + /* write opcode in first descriptor */ + tx_desc->tx.meta_ctrl = swap32_to_le(attr); + } + /* write meta data */ + if (xaction->op == AL_RAID_OP_MEM_SET) { + /* memset needs length in meta desc */ + uint32_t total_len = _al_raid_rx_get_len(xaction); + flags_len |= total_len & AL_M2S_DESC_LEN_MASK; + tx_desc->tx_meta.meta1 = xaction->data[1]; + tx_desc->tx_meta.meta2 = xaction->data[0]; + } else if (xaction->op == AL_RAID_OP_MEM_CMP) { + tx_desc->tx_meta.meta1 = xaction->pattern_data[blk_idx]; + tx_desc->tx_meta.meta2 = xaction->pattern_mask[blk_idx]; + } + tx_desc->tx.len_ctrl = swap32_to_le(flags_len); + } + + if (xaction->flags & AL_SSM_SRC_NO_SNOOP) + flags |= AL_M2S_DESC_NO_SNOOP_H; + if (xaction->flags & AL_SSM_BARRIER) + flags |= AL_M2S_DESC_DMB; + + /* for each desc set buffer length, address */ + block = xaction->srcs_blocks; + for (blk_idx = 0; blk_idx < xaction->num_of_srcs; blk_idx++) { + uint32_t attr = op_attr->opcode; + struct al_buf *buf = block->bufs; + unsigned int buf_idx = 0; + + attr &= ~0xFFFFF; + if (op_attr->have_g_coef == AL_TRUE) + attr |= xaction->coefs[blk_idx] & 0xFF; + if (blk_idx == 0) + attr |= AL_RAID_TX_DESC_META_FIRST_SOURCE; + if (blk_idx == (xaction->num_of_srcs - 1)) + attr |= AL_RAID_TX_DESC_META_LAST_SOURCE; + if (op_attr->have_p_coef == AL_TRUE) + if ((xaction->op != AL_RAID_OP_PQ_VAL) || + (blk_idx != xaction->q_index)) + attr |= AL_RAID_TX_DESC_META_P_ENABLE; + + for (buf_idx = 0; buf_idx < block->num; buf_idx++) { + uint64_t vmid = ((uint64_t)block->vmid) << + AL_UDMA_DESC_VMID_SHIFT; + uint32_t flags_len = flags; + uint32_t ring_id; + + /* clear first and DMB flags, keep no snoop hint flag */ + flags &= AL_M2S_DESC_NO_SNOOP_H; + + tx_desc = al_udma_desc_get(tx_udma_q); + /* get ring id, and clear FIRST and Int flags */ + ring_id = al_udma_ring_id_get(tx_udma_q) << + AL_M2S_DESC_RING_ID_SHIFT; + + flags_len |= ring_id; + /* set LAST flag if last descriptor */ + if ((blk_idx == (xaction->num_of_srcs - 1)) && + (buf_idx == (block->num - 1))) + flags_len |= AL_M2S_DESC_LAST; + + flags_len |= buf->len & AL_M2S_DESC_LEN_MASK; + + if (buf_idx == 0) { + /* write attributes for descriptors */ + /* that start new source */ + tx_desc->tx.meta_ctrl = swap32_to_le(attr); + } else { + flags_len |= AL_M2S_DESC_CONCAT; + } + tx_desc->tx.len_ctrl = swap32_to_le(flags_len); + tx_desc->tx.buf_ptr = swap64_to_le(buf->addr | vmid); + /* move to next buffer/descriptor */ + buf++; + } + block++; + } +} + +/****************************** API functions *********************************/ +void al_raid_init(void __iomem *app_regs) +{ + /* initialize the GFLOG and GFILOG tables of the hw */ + _al_raid_load_table(app_regs, 1, gflog); + _al_raid_load_table(app_regs, 0, gfilog); +} + +/** + * prepare raid transaction + * + * @param raid_dma raid DMA handle + * @param qid queue index + * @param xaction transaction context + * + * @return 0 if no error found. + * -ENOSPC if no space available. + */ +int al_raid_dma_prepare( + struct al_ssm_dma *raid_dma, + uint32_t qid, + struct al_raid_transaction *xaction) +{ + uint32_t rx_descs; + uint32_t tx_descs, meta; + struct al_udma_q *rx_udma_q; + struct al_udma_q *tx_udma_q; + const struct al_op_attr *op_attr; + int rc; + + /* assert valid opcode */ + al_assert(xaction->op < + (sizeof(op_attr_table)/sizeof(op_attr_table[0]))); + + op_attr = &(op_attr_table[xaction->op]); + + /* calc rx (S2M) descriptors */ + rx_descs = _al_raid_xaction_rx_descs_count(xaction); + al_assert(rx_descs <= AL_SSM_MAX_DST_DESCS); + rc = al_udma_q_handle_get(&raid_dma->m2m_udma.rx_udma, qid, &rx_udma_q); + + al_assert(rc == 0); /* assert valid rx q handle */ + + if (unlikely(al_udma_available_get(rx_udma_q) < rx_descs)) { + al_dbg("raid [%s]: rx q has no enough free descriptor", + raid_dma->m2m_udma.name); + return -ENOSPC; + } + + /* calc tx (M2S) descriptors */ + meta = op_attr->meta_descs; + tx_descs = _al_raid_xaction_tx_descs_count(xaction, meta); + al_assert(tx_descs <= AL_SSM_MAX_SRC_DESCS); + rc = al_udma_q_handle_get(&raid_dma->m2m_udma.tx_udma, qid, &tx_udma_q); + al_assert(rc == 0); /* assert valid tx q handle */ + if (unlikely(al_udma_available_get(tx_udma_q) < tx_descs)) { + al_dbg("raid [%s]: tx q has no enough free descriptor", + raid_dma->m2m_udma.name); + return -ENOSPC; + } + + /* prepare rx descs */ + _al_raid_set_rx_descs(rx_udma_q, xaction); + /* add rx descriptors */ + al_udma_desc_action_add(rx_udma_q, rx_descs); + + /* prepare tx descriptors */ + _al_raid_set_tx_descs(tx_udma_q, xaction, meta, op_attr); + /* add tx descriptors */ + xaction->tx_descs_count = tx_descs; + /* union al_udma_desc_action_add(tx_udma_q, tx_descs); */ + + return 0; +} +EXPORT_SYMBOL(al_raid_dma_prepare); + +/** + * add previously prepared transaction to hw engine + * + * @param raid_dma raid DMA handle + * @param qid queue index + * @param tx_descs number of tx descriptors to be processed by the engine + * + * @return 0 if no error found. + * -ENOSPC if no space available. + */ +int al_raid_dma_action( + struct al_ssm_dma *raid_dma, + uint32_t qid, + uint32_t tx_descs) +{ + struct al_udma_q *tx_udma_q; + int rc; + + rc = al_udma_q_handle_get(&raid_dma->m2m_udma.tx_udma, qid, &tx_udma_q); + al_assert(rc == 0); /* assert valid tx q handle */ + + al_udma_desc_action_add(tx_udma_q, tx_descs); + + return 0; +} +EXPORT_SYMBOL(al_raid_dma_action); + +/** + * check and cleanup completed transaction + * + * @param raid_dma raid DMA handle + * @param qid queue index + * @param comp_status status reported by rx completion descriptor + * + * @return 1 if a transaction was completed. 0 otherwise + */ +int al_raid_dma_completion( + struct al_ssm_dma *raid_dma, + uint32_t qid, + uint32_t *comp_status) +{ + struct al_udma_q *rx_udma_q; + struct al_udma_q *tx_udma_q; + volatile union al_udma_cdesc *cdesc; + int rc; + uint32_t cdesc_count; + + rc = al_udma_q_handle_get(&raid_dma->m2m_udma.rx_udma, qid, &rx_udma_q); + al_assert(rc == 0); /* assert valid rx q handle */ + + cdesc_count = al_udma_cdesc_packet_get(rx_udma_q, &cdesc); + if (cdesc_count == 0) + return 0; + + /* if we have multiple completion descriptors, then last one will have + the valid status */ + if (unlikely(cdesc_count > 1)) + cdesc = al_cdesc_next(rx_udma_q, cdesc, cdesc_count - 1); + + *comp_status = swap32_from_le(cdesc->al_desc_comp_rx.ctrl_meta) & + RX_COMP_STATUS_MASK; + + al_dbg( + "raid [%s %d]: packet completed. " + "count %d status desc %p meta %x\n", + raid_dma->m2m_udma.name, qid, cdesc_count, cdesc, + cdesc->al_desc_comp_rx.ctrl_meta); + + al_udma_cdesc_ack(rx_udma_q, cdesc_count); + + /* cleanup tx completion queue */ + rc = al_udma_q_handle_get(&raid_dma->m2m_udma.tx_udma, qid, &tx_udma_q); + al_assert(rc == 0); /* assert valid tx q handle */ + + cdesc_count = al_udma_cdesc_get_all(tx_udma_q, NULL); + if (cdesc_count != 0) + al_udma_cdesc_ack(tx_udma_q, cdesc_count); + + return 1; +} +EXPORT_SYMBOL(al_raid_dma_completion); +/** @} end of RAID group */ diff --git a/target/linux/alpine/files/drivers/dma/al/al_hal_ssm_raid.h b/target/linux/alpine/files/drivers/dma/al/al_hal_ssm_raid.h new file mode 100644 index 00000000000000..fb3c10f5811fd0 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_hal_ssm_raid.h @@ -0,0 +1,160 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_raid_api API + * @ingroup group_raid + * RAID API + * @{ + * @file al_hal_ssm_raid.h + * + * @brief Header file for RAID acceleration unit HAL driver + * + */ + +#ifndef __AL_HAL_RAID_H__ +#define __AL_HAL_RAID_H__ + +#include +#include +#include +#include + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/* PCI Adapter Device/Revision ID */ +#define AL_RAID_DEV_ID 0x0021 +#define AL_RAID_REV_ID_0 0 +#define AL_RAID_REV_ID_1 1 + +enum al_raid_op { + AL_RAID_OP_MEM_CPY = 0, /* memory copy */ + AL_RAID_OP_MEM_SET, /* memory set with 64b data */ + AL_RAID_OP_MEM_SCRUB, /* memory read */ + AL_RAID_OP_MEM_CMP, /* compare block with 128 bit pattern */ + AL_RAID_OP_NOP, /* no-operation */ + AL_RAID_OP_P_CALC, /* RAID5/6 Parity (xor) calculation */ + AL_RAID_OP_Q_CALC, /* RAID6 Q calculation */ + AL_RAID_OP_PQ_CALC, /* P and Q calculation */ + AL_RAID_OP_P_VAL, /* Parity validation */ + AL_RAID_OP_Q_VAL, /* Q validation */ + AL_RAID_OP_PQ_VAL, /* P and Q validation */ +}; + + +#define AL_RAID_RX_DESC_META (1<<30) /* Meta data */ + +#define AL_RAID_TX_DESC_META_OPCODE_MASK (0x1f<<20) /* RAID_op & type combined */ +#define AL_RAID_TX_DESC_META_OPCODE_SHIFT (20) +#define AL_RAID_TX_DESC_META_FIRST_SOURCE (1<<19) /*TODO beginning of 1st block */ +#define AL_RAID_TX_DESC_META_LAST_SOURCE (1<<18) /* beginning of last block */ +#define AL_RAID_TX_DESC_META_P_ENABLE (1<<17) /* P calculation Coef */ + +/* define the HW opcode with the needed shift, also the code combines the */ +/* op class (mem or raid) and type */ +#define AL_RAID_OPCODE(x) ((x) << AL_RAID_TX_DESC_META_OPCODE_SHIFT) +#define AL_RAID_MEM_CPY_OPCODE AL_RAID_OPCODE(0x0) +#define AL_RAID_MEM_SET_OPCODE AL_RAID_OPCODE(0x1) +#define AL_RAID_MEM_SCRUB_OPCODE AL_RAID_OPCODE(0x2) +#define AL_RAID_MEM_CMP_OPCODE AL_RAID_OPCODE(0x3) +#define AL_RAID_NOP_OPCODE AL_RAID_OPCODE(0x8) +#define AL_RAID_P_CALC_OPCODE AL_RAID_OPCODE(0x9) +#define AL_RAID_Q_CALC_OPCODE AL_RAID_OPCODE(0xa) +#define AL_RAID_PQ_CALC_OPCODE AL_RAID_OPCODE(0xb) +#define AL_RAID_P_VAL_OPCODE AL_RAID_OPCODE(0xd) +#define AL_RAID_Q_VAL_OPCODE AL_RAID_OPCODE(0xe) +#define AL_RAID_PQ_VAL_OPCODE AL_RAID_OPCODE(0xf) +#define AL_RAID_PARALLEL_MEM_CPY_OPCODE AL_RAID_OPCODE(0x10) + +/* transaction completion status */ +#define AL_RAID_P_VAL_ERROR AL_BIT(0) +#define AL_RAID_Q_VAL_ERROR AL_BIT(1) +#define AL_RAID_BUS_PARITY_ERROR AL_BIT(2) +#define AL_RAID_SOURCE_LEN_ERROR AL_BIT(3) +#define AL_RAID_CMD_DECODE_ERROR AL_BIT(4) +#define AL_RAID_INTERNAL_ERROR AL_BIT(5) + +#define AL_RAID_REDIRECTED_TRANSACTION AL_BIT(16) +#define AL_RAID_REDIRECTED_SRC_UDMA (AL_BIT(13) | AL_BIT(12)) +#define AL_RAID_REDIRECTED_SRC_QUEUE (AL_BIT(9) | AL_BIT(8)) + +struct al_raid_transaction { + enum al_raid_op op; + enum al_ssm_op_flags flags; + struct al_block *srcs_blocks; + uint32_t num_of_srcs; + uint32_t total_src_bufs; /* total number of buffers of all source blocks */ + struct al_block *dsts_blocks; + uint32_t num_of_dsts; + uint32_t total_dst_bufs; /* total number of buffers of all destination blocks */ + uint32_t tx_descs_count; /* number of tx descriptors created for this */ + /* transaction, this field set by the hal */ + + /* the following fields are operation specific */ + uint8_t *coefs; /* RAID6 Q coefficients of source blocks */ + uint8_t q_index; /* RAID6 PQ_VAL: index of q src block, the parity*/ + /* calculation will ignore that buffer */ + uint32_t data[2]; /* MEM SET data */ + + uint32_t mem_set_flags; /* MEM SET special flags, should be zero */ + uint32_t pattern_data[4]; /* MEM CMP pattern data */ + uint32_t pattern_mask[4]; /* MEM CMP pattern mask */ +}; + +/* Init RAID GFLOG and GFILOG tables */ +void al_raid_init(void __iomem *app_regs); + +/* prepare raid transaction */ +int al_raid_dma_prepare(struct al_ssm_dma *raid_dma, uint32_t qid, + struct al_raid_transaction *xaction); + +/* add previously prepared transaction to engine */ +int al_raid_dma_action(struct al_ssm_dma *raid_dma, uint32_t qid, + uint32_t tx_descs); + +/* get new completed transaction */ +int al_raid_dma_completion(struct al_ssm_dma *raid_dma, uint32_t qid, + uint32_t *comp_status); +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of RAID group */ +#endif /* __AL_HAL_RAID_H__ */ diff --git a/target/linux/alpine/files/drivers/dma/al/al_hal_ssm_raid_regs.h b/target/linux/alpine/files/drivers/dma/al/al_hal_ssm_raid_regs.h new file mode 100644 index 00000000000000..c3cd18bb8a6e83 --- /dev/null +++ b/target/linux/alpine/files/drivers/dma/al/al_hal_ssm_raid_regs.h @@ -0,0 +1,239 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#ifndef __AL_HAL_RAID_ACCELERATOR_REGS_H +#define __AL_HAL_RAID_ACCELERATOR_REGS_H + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + +struct raid_accelerator_configuration { + uint32_t unit_conf; /* Unit configuration register */ + uint32_t rsrvd[3]; +}; + +struct raid_accelerator_error { + uint32_t unit_status; /* Error status registerSee mask_fatal_error register ... */ + uint32_t mask_fatal_error; /* Interrupt error configuration register: A bit in th ... */ + uint32_t rsrvd[2]; +}; + +struct raid_accelerator_gflog_table { + uint32_t w0_raw; /* GFLOG Table Word0 */ + uint32_t w1_raw; /* GFLOG Table Word1R_n referes ro Raw n in the table */ + uint32_t w2_raw; /* GFLOG Table Word2R_n referes ro Raw n in the table */ + uint32_t w3_raw; /* GFLOG Table Word3R_n referes ro Raw n in the table */ +}; + +struct raid_accelerator_log { + uint32_t desc_word0; /* Descriptor word 0 */ + uint32_t desc_word1; /* Descriptor word 1 */ + uint32_t trans_info_1; /* Transaction Information of the command that trigger ... */ + uint32_t trans_info_2; /* Transaction Information of the command that trigger ... */ + uint32_t rsrvd[4]; +}; + +struct raid_accelerator_gfilog_table { + uint32_t w0_r; /* GFILOG Table Word0 */ + uint32_t w1_r; /* GFILOG Table Word1R_n referes ro Raw n in the table */ + uint32_t w2_r; /* GFILOG Table Word2R_n referes ro Raw n in the table */ + uint32_t w3_r; /* GFILOG Table Word3R_n referes ro Raw n in the table */ +}; + +struct raid_accelerator_raid_status { + uint32_t rsrvd[1]; + uint32_t status; /* Performance counter control */ +}; + +struct raid_accelerator_raid_perf_counter { + uint32_t exec_cnt; /* The execution cycle counter Measure number of cycle ... */ + uint32_t m2s_active_cnt; /* M2S active cycles counterMeasure number of cycles M ... */ + uint32_t m2s_idle_cnt; /* M2S idle cycles counterMeasure number of idle cycle ... */ + uint32_t m2s_backp_cnt; /* M2S back prussure cycles counterMeasure number of ... */ + uint32_t s2m_active_cnt; /* S2M active cycles counterMeasure number of cycles r ... */ + uint32_t s2m_idle_cnt; /* S2M idle cycles counterMeasure number of idle cycle ... */ + uint32_t s2m_backp_cnt; /* S2M backpressure CounterS2M backpressure cycles cou ... */ + uint32_t cmd_dn_cnt; /* RAID Command Done CounterTotal Number of RAID comma ... */ + uint32_t src_blocks_cnt; /* RAID Source Blocks CounterTotal Number of Source Bl ... */ + uint32_t dst_blocks_cnt; /* RAID Destination Blocks CounterTotal Number of Dest ... */ + uint32_t mem_cmd_dn_cnt; /* Memory Command Done CounterTotal Number of Non-RAID ... */ + uint32_t recover_err_cnt; /* Recoverable Errors counterTotal Number of recoverab ... */ + uint32_t src_data_beats; /* Count the number of the data beats enter to RAID */ + uint32_t dst_data_beats; /* Count the number of the data beats get out from RAI ... */ + uint32_t rsrvd[6]; +}; + +struct raid_accelerator_perfm_cnt_cntl { + uint32_t conf; /* Performance counter control */ + uint32_t rsrvd[27]; + +}; + + +struct raid_accelerator_regs { + struct raid_accelerator_configuration configuration; + struct raid_accelerator_error error; + struct raid_accelerator_log log; + struct raid_accelerator_raid_perf_counter raid_perf_counter; + struct raid_accelerator_perfm_cnt_cntl perfm_cnt_cntl; + struct raid_accelerator_gflog_table gflog_table[16]; + struct raid_accelerator_gfilog_table gfilog_table[16]; + struct raid_accelerator_raid_status raid_status; +}; + + +/* +* Registers Fields +*/ + + +/**** unit_conf register ****/ +/* When this bit is set to 1, the raid engine accept n ... */ +#define RAID_ACCELERATOR_CONFIGURATION_UNIT_CONF_MUL_CMD_EN (1 << 0) +/* When this bit is set to 1, when error occure the pi ... */ +#define RAID_ACCELERATOR_CONFIGURATION_UNIT_CONF_HOLD_PIPE_WHEN_ERROR (1 << 1) +/* When this bit is set to 1, Reset the ack fifo. */ +#define RAID_ACCELERATOR_CONFIGURATION_UNIT_CONF_FIFO_ACK_ENABLE_MASK 0x0000007C +#define RAID_ACCELERATOR_CONFIGURATION_UNIT_CONF_FIFO_ACK_ENABLE_SHIFT 2 + +/**** unit_status register ****/ +/* Timeout on S2M */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_S2M_TOUT (1 << 0) +/* Timeout on M2S */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_M2S_TOUT (1 << 1) +/* Wrong/Unknown Command */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_CMD_DECODE_ERR (1 << 2) +/* Multiple Source-Blocks that are not equal in size */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_BLOCK_SIZE_ERR (1 << 3) +/* Wrong and illegal software configuration of the des ... */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_ILLEGAL_CONF (1 << 4) +/* source length is bigger from 16Kbytes for p_only or ... */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_SOURCE_ABOVE_16K (1 << 5) +/* source length is bigger from 8Kbytes for p&q operat ... */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_SOURCE_ABOVE_8K (1 << 6) +/* Data read frominternal memory has parity error */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_INTERNAL_PARITY_ERR (1 << 7) +/* Error received from M2S interface */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_M2S_ERR (1 << 8) +/* Completion acknoledge Fifo overrun */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_ACK_FIFO_OVR_MASK 0x00003E00 +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_ACK_FIFO_OVR_SHIFT 9 +/* Data FIFO (used in Q operation) overrun */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_Q_FIFO_OVR (1 << 14) +/* EOP without SOP */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_EOP_WO_SOP (1 << 15) +/* SOP without EOP */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_SOP_WO_EOP (1 << 16) +/* SOP and EOP in the same cycle */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_SOP_EOP_SAME_CYCLE (1 << 17) +/* Request from strem without SOP */ +#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_REQ_VALID_WO_SOP (1 << 18) + +/**** mask_fatal_error register ****/ +/* Timeout on S2M */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_S2M_TOUT (1 << 0) +/* Timeout on M2S */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_M2S_TOUT (1 << 1) +/* Wrong/Unknown Command */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_CMD_DECODE_ERR (1 << 2) +/* Multiple Source-Blocks that are not equal in size */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_BLOCK_SIZE_ERR (1 << 3) +/* Wrong and illegal software configuration of the des ... */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_ILLEGAL_CONF (1 << 4) +/* source length is bigger from 16Kbytes for p_only or ... */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_SOURCE_ABOVE_16K (1 << 5) +/* source length is bigger from 8Kbytes for p&q operat ... */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_SOURCE_ABOVE_8K (1 << 6) +/* Data read frominternal memory has parity error */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_INTERNAL_PARITY_ERR (1 << 7) +/* Error received from M2S interface */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_M2S_ERR (1 << 8) +/* Completion acknoledge Fifo overrun */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_ACK_FIFO_OVR_MASK 0x00003E00 +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_ACK_FIFO_OVR_SHIFT 9 +/* Data FIFO (used in Q operation) overrun */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_Q_FIFO_OVR (1 << 14) +/* EOP without SOP */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_EOP_WO_SOP (1 << 15) +/* SOP without EOP */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_SOP_WO_EOP (1 << 16) +/* SOP and EOP in the same cycle */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_SOP_EOP_SAME_CYCLE (1 << 17) +/* Request from strem without SOP */ +#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_REQ_VALID_WO_SOP (1 << 18) + +/**** trans_info_1 register ****/ +/* Transaction length in bytes */ +#define RAID_ACCELERATOR_LOG_TRANS_INFO_1_TRANS_LEN_MASK 0x000FFFFF +#define RAID_ACCELERATOR_LOG_TRANS_INFO_1_TRANS_LEN_SHIFT 0 +/* Number of descriptors in the transaction */ +#define RAID_ACCELERATOR_LOG_TRANS_INFO_1_NUM_OF_DESC_MASK 0x00F00000 +#define RAID_ACCELERATOR_LOG_TRANS_INFO_1_NUM_OF_DESC_SHIFT 20 +/* Reserved */ +#define RAID_ACCELERATOR_LOG_TRANS_INFO_1_RESERVED_MASK 0xFF000000 +#define RAID_ACCELERATOR_LOG_TRANS_INFO_1_RESERVED_SHIFT 24 + +/**** trans_info_2 register ****/ +/* Queue Number of the transaction */ +#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_Q_NUM_MASK 0x00000FFF +#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_Q_NUM_SHIFT 0 +/* UDMA ID of the transaction */ +#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_DMA_ID_MASK 0x0000F000 +#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_DMA_ID_SHIFT 12 +/* Internal Serial Number of the transaction */ +#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_SERIAL_NUM_MASK 0x03FF0000 +#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_SERIAL_NUM_SHIFT 16 +/* Reserved */ +#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_RESERVED_MASK 0xFC000000 +#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_RESERVED_SHIFT 26 + +/**** conf register ****/ +/* Not effect the recover_err_cnt 0: clear pe ... */ +#define RAID_ACCELERATOR_PERFM_CNT_CNTL_CONF_CONT_PERFORM_MASK 0x00000003 +#define RAID_ACCELERATOR_PERFM_CNT_CNTL_CONF_CONT_PERFORM_SHIFT 0 + +/**** status register ****/ +/* indicate when RAID is empty. */ +#define RAID_ACCELERATOR_RAID_STATUS_STATUS_RAID_EMPTY (1 << 0) + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_RAID_ACCELERATOR_REG_H */ diff --git a/target/linux/alpine/files/drivers/edac/al/Kconfig b/target/linux/alpine/files/drivers/edac/al/Kconfig new file mode 100644 index 00000000000000..7645d4b1e46047 --- /dev/null +++ b/target/linux/alpine/files/drivers/edac/al/Kconfig @@ -0,0 +1,6 @@ +config EDAC_AL_MC + tristate "Annapurna Labs Memory Controller" + depends on ARCH_ALPINE + help + Support for error detection and correction for Annapurna + Labs Alpine chipset diff --git a/target/linux/alpine/files/drivers/edac/al/Makefile b/target/linux/alpine/files/drivers/edac/al/Makefile new file mode 100644 index 00000000000000..5406d0488963f0 --- /dev/null +++ b/target/linux/alpine/files/drivers/edac/al/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_EDAC_AL_MC) += al_mc_edac.o + ++al_mc_edac-objs := al_mc_edac_core.c al_hal_ddr.c diff --git a/target/linux/alpine/files/drivers/edac/al/al_hal_ddr.c b/target/linux/alpine/files/drivers/edac/al/al_hal_ddr.c new file mode 100644 index 00000000000000..db2c1b17bfd5bd --- /dev/null +++ b/target/linux/alpine/files/drivers/edac/al/al_hal_ddr.c @@ -0,0 +1,1116 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @addtogroup groupddr + * + * @{ + * @file al_hal_ddr.c + * + * @brief DDR controller & PHY HAL driver + * + */ + +#include +#include "al_hal_ddr.h" +#include "al_hal_ddr_ctrl_regs.h" +#include "al_hal_ddr_phy_regs.h" +#include "al_hal_ddr_utils.h" + +/* Wait for PHY BIST to be done */ +static int al_ddr_phy_wait_for_bist_done( + struct al_ddr_phy_regs __iomem *phy_regs); + +/******************************************************************************* + ******************************************************************************/ +int al_ddr_phy_datx_bist( + void __iomem *ddr_ctrl_regs_base, + void __iomem *ddr_phy_regs_base, + struct al_ddr_bist_params *params) +{ + int i; + int err; + + struct al_ddr_phy_regs __iomem *phy_regs = + (struct al_ddr_phy_regs __iomem *)ddr_phy_regs_base; + + uint32_t mode = + (params->mode == AL_DDR_BIST_MODE_LOOPBACK) ? + DWC_DDR_PHY_REGS_BISTRR_BMODE_LOOPBACK : + DWC_DDR_PHY_REGS_BISTRR_BMODE_DRAM; + + uint32_t pattern = + (params->pat == AL_DDR_BIST_PATTERN_WALK_0) ? + DWC_DDR_PHY_REGS_BISTRR_BDPAT_WALKING_0 : + (params->pat == AL_DDR_BIST_PATTERN_WALK_1) ? + DWC_DDR_PHY_REGS_BISTRR_BDPAT_WALKING_1 : + (params->pat == AL_DDR_BIST_PATTERN_LFSR) ? + DWC_DDR_PHY_REGS_BISTRR_BDPAT_LFSR : + DWC_DDR_PHY_REGS_BISTRR_BDPAT_USER; + + al_ddr_phy_vt_calc_disable(phy_regs); + + al_ddr_ctrl_stop(ddr_ctrl_regs_base); + + /** + * Init BIST mode of operation + */ + + /* BISTUDPR */ + _al_reg_write32_masked( + &phy_regs->BISTUDPR, + DWC_DDR_PHY_REGS_BISTUDPR_BUDP0_MASK | + DWC_DDR_PHY_REGS_BISTUDPR_BUDP1_MASK, + DWC_DDR_PHY_REGS_BISTUDPR_BUDP0(params->user_pat_even) | + DWC_DDR_PHY_REGS_BISTUDPR_BUDP1(params->user_pat_odd)); + + /* BISTWCR */ + _al_reg_write32_masked( + &phy_regs->BISTWCR, + DWC_DDR_PHY_REGS_BISTWCR_BWCNT_MASK, + DWC_DDR_PHY_REGS_BISTWCR_BWCNT(params->wc)); + + /* BISTAR0 */ + _al_reg_write32_masked( + &phy_regs->BISTAR[0], + DWC_DDR_PHY_REGS_BISTAR0_BCOL_MASK | + DWC_DDR_PHY_REGS_BISTAR0_BROW_MASK | + DWC_DDR_PHY_REGS_BISTAR0_BBANK_MASK, + DWC_DDR_PHY_REGS_BISTAR0_BCOL(params->col_min) | + DWC_DDR_PHY_REGS_BISTAR0_BROW(params->row_min) | + DWC_DDR_PHY_REGS_BISTAR0_BBANK(params->bank_min)); + + /* BISTAR1 */ + _al_reg_write32_masked( + &phy_regs->BISTAR[1], + DWC_DDR_PHY_REGS_BISTAR1_BRANK_MASK | + DWC_DDR_PHY_REGS_BISTAR1_BMRANK_MASK | + DWC_DDR_PHY_REGS_BISTAR1_BAINC_MASK, + DWC_DDR_PHY_REGS_BISTAR1_BRANK(params->rank_min) | + DWC_DDR_PHY_REGS_BISTAR1_BMRANK(params->rank_max) | + DWC_DDR_PHY_REGS_BISTAR1_BAINC(params->inc)); + + /* BISTAR2 */ + _al_reg_write32_masked( + &phy_regs->BISTAR[2], + DWC_DDR_PHY_REGS_BISTAR2_BMCOL_MASK | + DWC_DDR_PHY_REGS_BISTAR2_BMROW_MASK | + DWC_DDR_PHY_REGS_BISTAR2_BMBANK_MASK, + DWC_DDR_PHY_REGS_BISTAR2_BMCOL(params->col_max) | + DWC_DDR_PHY_REGS_BISTAR2_BMROW(params->row_max) | + DWC_DDR_PHY_REGS_BISTAR2_BMBANK(params->bank_max)); + + /* Run DATX8 BIST */ + for (i = 0; i < AL_DDR_PHY_NUM_BYTE_LANES; i++) { + if (!params->active_byte_lanes[i]) + continue; + + /* Reset status */ + _al_reg_write32_masked( + &phy_regs->BISTRR, + DWC_DDR_PHY_REGS_BISTRR_BINST_MASK, + DWC_DDR_PHY_REGS_BISTRR_BINST_RESET); + + /* Run BIST */ + _al_reg_write32_masked( + &phy_regs->BISTRR, + DWC_DDR_PHY_REGS_BISTRR_BINST_MASK | + DWC_DDR_PHY_REGS_BISTRR_BMODE_MASK | + DWC_DDR_PHY_REGS_BISTRR_BDXEN | + DWC_DDR_PHY_REGS_BISTRR_BACEN | + DWC_DDR_PHY_REGS_BISTRR_BDPAT_MASK | + DWC_DDR_PHY_REGS_BISTRR_BDXSEL_MASK, + DWC_DDR_PHY_REGS_BISTRR_BINST_RUN | + mode | + DWC_DDR_PHY_REGS_BISTRR_BDXEN | + pattern | + DWC_DDR_PHY_REGS_BISTRR_BDXSEL(i)); + + al_data_memory_barrier(); + + /* Read BISTGSR for BIST done */ + err = al_ddr_phy_wait_for_bist_done(phy_regs); + if (err) { + al_err( + "%s:%d: al_ddr_phy_wait_for_bist_done failed " + "(byte lane %d)!\n", + __func__, + __LINE__, + i); + return err; + } + } + + /* stop BIST */ + _al_reg_write32_masked( + &phy_regs->BISTRR, + DWC_DDR_PHY_REGS_BISTRR_BINST_MASK, + DWC_DDR_PHY_REGS_BISTRR_BINST_NOP); + + /* PGCR3 - after BIST re-apply power down of unused DQs */ + _al_reg_write32_masked( + &phy_regs->PGCR[3], + DWC_DDR_PHY_REGS_PGCR3_GATEDXCTLCLK | + DWC_DDR_PHY_REGS_PGCR3_GATEDXDDRCLK | + DWC_DDR_PHY_REGS_PGCR3_GATEDXRDCLK, + DWC_DDR_PHY_REGS_PGCR3_GATEDXCTLCLK | + DWC_DDR_PHY_REGS_PGCR3_GATEDXDDRCLK | + DWC_DDR_PHY_REGS_PGCR3_GATEDXRDCLK); + + al_ddr_phy_vt_calc_enable(phy_regs); + + al_ddr_ctrl_resume(ddr_ctrl_regs_base); + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +int al_ddr_phy_ac_bist( + void __iomem *ddr_phy_regs_base, + enum al_ddr_bist_pat pat) +{ + int err; + + struct al_ddr_phy_regs __iomem *phy_regs = + (struct al_ddr_phy_regs __iomem *)ddr_phy_regs_base; + + uint32_t pattern = + (pat == AL_DDR_BIST_PATTERN_WALK_0) ? + DWC_DDR_PHY_REGS_BISTRR_BDPAT_WALKING_0 : + (pat == AL_DDR_BIST_PATTERN_WALK_1) ? + DWC_DDR_PHY_REGS_BISTRR_BDPAT_WALKING_1 : + DWC_DDR_PHY_REGS_BISTRR_BDPAT_LFSR; + + /* Run AC BIST */ + /* Reset status */ + _al_reg_write32_masked( + &phy_regs->BISTRR, + DWC_DDR_PHY_REGS_BISTRR_BINST_MASK, + DWC_DDR_PHY_REGS_BISTRR_BINST_RESET); + + /* Power up I/O receivers */ + _al_reg_write32_masked( + &phy_regs->ACIOCR[0], + DWC_DDR_PHY_REGS_ACIOCR0_ACPDR | + DWC_DDR_PHY_REGS_ACIOCR0_CKPDR_MASK | + DWC_DDR_PHY_REGS_ACIOCR0_RANKPDR_MASK | + DWC_DDR_PHY_REGS_ACIOCR0_RSTPDR, + 0); + + /* Loopback before buffer in I/O */ + al_reg_write32_masked( + &phy_regs->PGCR[1], + DWC_DDR_PHY_REGS_PGCR1_IOLB, + DWC_DDR_PHY_REGS_PGCR1_IOLB); + + /* Run BIST */ + _al_reg_write32_masked( + &phy_regs->BISTRR, + DWC_DDR_PHY_REGS_BISTRR_BINST_MASK | + DWC_DDR_PHY_REGS_BISTRR_BMODE_MASK | + DWC_DDR_PHY_REGS_BISTRR_BDXEN | + DWC_DDR_PHY_REGS_BISTRR_BACEN | + DWC_DDR_PHY_REGS_BISTRR_BDPAT_MASK, + DWC_DDR_PHY_REGS_BISTRR_BINST_RUN | + DWC_DDR_PHY_REGS_BISTRR_BMODE_LOOPBACK | + DWC_DDR_PHY_REGS_BISTRR_BACEN | + pattern); + + al_data_memory_barrier(); + + /* Read BISTGSR for BIST done */ + err = al_ddr_phy_wait_for_bist_done(phy_regs); + if (err) { + al_err( + "%s:%d: al_ddr_phy_wait_for_bist_done failed!\n", + __func__, + __LINE__); + return err; + } + + /* Power down I/O receivers */ + _al_reg_write32_masked( + &phy_regs->ACIOCR[0], + DWC_DDR_PHY_REGS_ACIOCR0_ACPDR | + DWC_DDR_PHY_REGS_ACIOCR0_CKPDR_MASK | + DWC_DDR_PHY_REGS_ACIOCR0_RANKPDR_MASK | + DWC_DDR_PHY_REGS_ACIOCR0_RSTPDR, + DWC_DDR_PHY_REGS_ACIOCR0_ACPDR | + DWC_DDR_PHY_REGS_ACIOCR0_CKPDR_MASK | + DWC_DDR_PHY_REGS_ACIOCR0_RANKPDR_MASK | + DWC_DDR_PHY_REGS_ACIOCR0_RSTPDR); + + /* stop BIST */ + _al_reg_write32_masked( + &phy_regs->BISTRR, + DWC_DDR_PHY_REGS_BISTRR_BINST_MASK, + DWC_DDR_PHY_REGS_BISTRR_BINST_NOP); + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +int al_ddr_power_mode_set( + void __iomem *ddr_ctrl_regs_base, + enum al_ddr_power_mode power_mode, + unsigned int timer_x32) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + uint32_t mode = + (power_mode == AL_DDR_POWERMODE_SELF_REFRESH) ? + DWC_DDR_UMCTL2_REGS_PWRCTL_SELFREF_EN : + (power_mode == AL_DDR_POWERMODE_POWER_DOWN) ? + DWC_DDR_UMCTL2_REGS_PWRCTL_POWERDOWN_EN : + 0; + + /* + * Addressing RMN: 1037 + * + * RMN description: + * In the current logic, it is possible for DRAM Read data and/or + * Write data to be active while/after one of the following occurs: + * Power Down Entry (PDE) + * Self Refresh Entry (SRE) + * This would violate the memory protocol DDR3 which require + * "no data bursts are in progress" when the above commands occur. + * Software flow: + * For violations related to PDE, issue can be avoided by ensuring + * that timer_x32>1. For violations related to SRE, ensure that + * AL_DDR_POWERMODE_SELF_REFRESH is set only after all Read data has + * been returned on your application interface and all write data has + * reached the DRAM. + */ + + if (timer_x32 <= 1) { + al_err( + "%s:%d: power mode timer must be greater than 1!\n", + __func__, + __LINE__); + return -EIO; + } + + al_reg_write32(&ctrl_regs->pwrtmg, + timer_x32 << DWC_DDR_UMCTL2_REGS_PWRTMG_POWERDOWN_TO_X32_SHIFT); + + al_reg_write32(&ctrl_regs->pwrctl, mode); + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +enum al_ddr_operating_mode al_ddr_operating_mode_get( + void __iomem *ddr_ctrl_regs_base) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + uint32_t reg_val; + enum al_ddr_operating_mode operating_mode; + + reg_val = al_reg_read32(&ctrl_regs->stat); + reg_val &= DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_MASK; + + operating_mode = + (reg_val == DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_INIT) ? + AL_DDR_OPERATING_MODE_INIT : + (reg_val == DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_NORMAL) ? + AL_DDR_OPERATING_MODE_NORMAL : + (reg_val == DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_POWER_DOWN) ? + AL_DDR_OPERATING_MODE_POWER_DOWN : + AL_DDR_OPERATING_MODE_SELF_REFRESH; + + return operating_mode; +} + +/******************************************************************************* + ******************************************************************************/ +static int al_ddr_phy_wait_for_bist_done( + struct al_ddr_phy_regs __iomem *phy_regs) +{ + int err; + uint32_t reg_val; + + err = al_ddr_reg_poll32( + &phy_regs->BISTGSR, + DWC_DDR_PHY_REGS_BISTGSR_BDONE, + DWC_DDR_PHY_REGS_BISTGSR_BDONE, + DEFAULT_TIMEOUT); + + if (err) { + al_err("%s: al_ddr_reg_poll32 failed!\n", __func__); + return err; + } + + reg_val = al_reg_read32(&phy_regs->BISTGSR); + /* Make sure no bist errors */ + if (reg_val & + (DWC_DDR_PHY_REGS_BISTGSR_BACERR | + DWC_DDR_PHY_REGS_BISTGSR_BDXERR)) { + al_err("%s: PHY bist error (BISTGSR = %X)!\n", + __func__, reg_val); + al_dbg("%s: (BISTWER = %X)!\n", + __func__, al_reg_read32(&phy_regs->BISTWER)); + al_dbg("%s: (BISTBER2 = %X)!\n", + __func__, al_reg_read32(&phy_regs->BISTBER[2])); + al_dbg("%s: (BISTBER3 = %X)!\n", + __func__, al_reg_read32(&phy_regs->BISTBER[3])); + al_dbg("%s: (BISTWCSR = %X)!\n", + __func__, al_reg_read32(&phy_regs->BISTWCSR)); + al_dbg("%s: (BISTFWR2 = %X)!\n", + __func__, al_reg_read32(&phy_regs->BISTFWR[2])); + return -EIO; + } + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +unsigned int al_ddr_active_ranks_get( + void __iomem *ddr_ctrl_regs_base) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + uint32_t mstr_val; + unsigned int active_ranks = 0; + + mstr_val = al_reg_read32(&ctrl_regs->mstr); + mstr_val &= DWC_DDR_UMCTL2_REGS_MSTR_ACTIVE_RANKS_MASK; + mstr_val >>= DWC_DDR_UMCTL2_REGS_MSTR_ACTIVE_RANKS_SHIFT; + + /* each bit on mstr_val is corresponding to an available rank */ + while(mstr_val > 0) { + active_ranks += 1; + mstr_val >>= 1; + } + + return active_ranks; +} + +/******************************************************************************* + ******************************************************************************/ +int al_ddr_ecc_status_get( + void __iomem *ddr_ctrl_regs_base, + struct al_ddr_ecc_status *corr_status, + struct al_ddr_ecc_status *uncorr_status) +{ + uint32_t reg_val; + + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + /* Correctable status */ + if (corr_status) { + reg_val = al_reg_read32(&ctrl_regs->eccstat); + corr_status->ecc_corrected_bit_num = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_CORRECTED_BIT_NUM_MASK) + >> DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_CORRECTED_BIT_NUM_SHIFT; + + reg_val = al_reg_read32(&ctrl_regs->eccerrcnt); + corr_status->err_cnt = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_CORR_ERR_CNT_MASK) + >> DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_CORR_ERR_CNT_SHIFT; + + reg_val = al_reg_read32(&ctrl_regs->ecccaddr0); + corr_status->row = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_ROW_MASK) + >> DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_ROW_SHIFT; + corr_status->rank = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_RANK_MASK) + >> DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_RANK_SHIFT; + + reg_val = al_reg_read32(&ctrl_regs->ecccaddr1); + corr_status->bank = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_BANK_MASK) + >> DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_BANK_SHIFT; + corr_status->col = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_COL_MASK) + >> DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_COL_SHIFT; + + corr_status->syndromes_31_0 = al_reg_read32(&ctrl_regs->ecccsyn0); + corr_status->syndromes_63_32 = al_reg_read32(&ctrl_regs->ecccsyn1); + reg_val = al_reg_read32(&ctrl_regs->ecccsyn2); + corr_status->syndromes_ecc = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCCSYN2_ECC_CORR_SYNDROMES_71_64_MASK) + >> DWC_DDR_UMCTL2_REGS_ECCCSYN2_ECC_CORR_SYNDROMES_71_64_SHIFT; + + corr_status->corr_bit_mask_31_0 = + al_reg_read32(&ctrl_regs->eccbitmask0); + corr_status->corr_bit_mask_63_32 = + al_reg_read32(&ctrl_regs->eccbitmask1); + reg_val = al_reg_read32(&ctrl_regs->eccbitmask2); + corr_status->corr_bit_mask_ecc = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCBITMASK2_ECC_CORR_BIT_MASK_71_64_MASK) >> + DWC_DDR_UMCTL2_REGS_ECCBITMASK2_ECC_CORR_BIT_MASK_71_64_SHIFT; + } + + /* Uncorrectable status */ + if (uncorr_status) { + reg_val = al_reg_read32(&ctrl_regs->eccerrcnt); + uncorr_status->err_cnt = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_UNCORR_ERR_CNT_MASK) + >> DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_UNCORR_ERR_CNT_SHIFT; + + reg_val = al_reg_read32(&ctrl_regs->eccuaddr0); + uncorr_status->row = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_ROW_MASK) + >> DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_ROW_SHIFT; + uncorr_status->rank = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_RANK_MASK) + >> DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_RANK_SHIFT; + + reg_val = al_reg_read32(&ctrl_regs->eccuaddr1); + uncorr_status->bank = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_BANK_MASK) + >> DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_BANK_SHIFT; + uncorr_status->col = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_COL_MASK) + >> DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_COL_SHIFT; + + uncorr_status->syndromes_31_0 = al_reg_read32(&ctrl_regs->eccusyn0); + uncorr_status->syndromes_63_32 = al_reg_read32(&ctrl_regs->eccusyn1); + reg_val = al_reg_read32(&ctrl_regs->eccusyn2); + uncorr_status->syndromes_ecc = (reg_val & + DWC_DDR_UMCTL2_REGS_ECCUSYN2_ECC_UNCORR_SYNDROMES_71_64_MASK) >> + DWC_DDR_UMCTL2_REGS_ECCUSYN2_ECC_UNCORR_SYNDROMES_71_64_SHIFT; + } + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +void al_ddr_ecc_cfg_get( + void __iomem *ddr_ctrl_regs_base, + struct al_ddr_ecc_cfg *cfg) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + uint32_t cfg_val; + + cfg_val = al_reg_read32(&ctrl_regs->ecccfg0); + + cfg->ecc_enabled = + ((cfg_val & DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_MASK) == + DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_DIS) ? AL_FALSE : AL_TRUE; + + /* dis_scrub is valid only when ecc mode is enabled */ + if (cfg->ecc_enabled) + cfg->scrub_enabled = + (cfg_val & DWC_DDR_UMCTL2_REGS_ECCCFG0_DIS_SCRUB) ? + AL_FALSE : AL_TRUE; +} + +/******************************************************************************* + ******************************************************************************/ +int al_ddr_ecc_corr_count_clear( + void __iomem *ddr_ctrl_regs_base) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + al_reg_write32(&ctrl_regs->eccclr, + DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_CORR_ERR_CNT); + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +int al_ddr_ecc_corr_int_clear( + void __iomem *nb_regs_base, + void __iomem *ddr_ctrl_regs_base) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + al_reg_write32(&ctrl_regs->eccclr, + DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_CORR_ERR); + + if (nb_regs_base) { + struct al_nb_regs __iomem *nb_regs; + + al_data_memory_barrier(); + + nb_regs = (struct al_nb_regs __iomem *)nb_regs_base; + al_reg_write32(&nb_regs->global.nb_int_cause, + ~NB_GLOBAL_NB_INT_CAUSE_MCTL_ECC_CORR_ERR); + } + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +int al_ddr_ecc_uncorr_count_clear( + void __iomem *ddr_ctrl_regs_base) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + al_reg_write32(&ctrl_regs->eccclr, + DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_UNCORR_ERR_CNT); + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +int al_ddr_ecc_uncorr_int_clear( + void __iomem *nb_regs_base, + void __iomem *ddr_ctrl_regs_base) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + al_reg_write32(&ctrl_regs->eccclr, + DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_UNCORR_ERR); + + if (nb_regs_base) { + struct al_nb_regs __iomem *nb_regs; + + al_data_memory_barrier(); + + nb_regs = (struct al_nb_regs __iomem *)nb_regs_base; + al_reg_write32(&nb_regs->global.nb_int_cause, + ~NB_GLOBAL_NB_INT_CAUSE_MCTL_ECC_UNCORR_ERR); + } + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +int al_ddr_ecc_data_poison_enable( + void __iomem *ddr_ctrl_regs_base, + unsigned int rank, + unsigned int bank, + unsigned int col, + unsigned int row) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + /* Set data poison address */ + al_reg_write32(&ctrl_regs->eccpoisonaddr0, + (col << + DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_ECC_POISON_COL_SHIFT) | + (rank << + DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_ECC_POISON_RANK_SHIFT)); + al_reg_write32(&ctrl_regs->eccpoisonaddr1, + (row << + DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_ECC_POISON_ROW_SHIFT) | + (bank << + DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_ECC_POISON_BANK_SHIFT)); + + /* Enable data poisoning */ + al_reg_write32(&ctrl_regs->ecccfg1, + DWC_DDR_UMCTL2_REGS_ECCCFG1_DATA_POISON); + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +int al_ddr_ecc_data_poison_disable( + void __iomem *ddr_ctrl_regs_base) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + /* Disable data poisoning */ + al_reg_write32(&ctrl_regs->ecccfg1, 0); + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +unsigned int al_ddr_parity_count_get( + void __iomem *ddr_ctrl_regs_base) +{ + uint32_t reg_val; + unsigned int parity_count; + + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + reg_val = al_reg_read32(&ctrl_regs->parstat); + parity_count = (reg_val & + DWC_DDR_UMCTL2_REGS_PARSTAT_DFI_PARITY_ERR_CNT_MASK) + >> DWC_DDR_UMCTL2_REGS_PARSTAT_DFI_PARITY_ERR_CNT_SHIFT; + + return parity_count; +} + +/******************************************************************************* + ******************************************************************************/ +void al_ddr_parity_count_clear( + void __iomem *ddr_ctrl_regs_base) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + al_reg_write32_masked(&ctrl_regs->parctl, + DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_CNT_CLR, + DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_CNT_CLR); +} + +/******************************************************************************* + ******************************************************************************/ +void al_ddr_parity_int_clear( + void __iomem *nb_regs_base, + void __iomem *ddr_ctrl_regs_base) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + struct al_nb_regs __iomem *nb_regs = + (struct al_nb_regs __iomem *)nb_regs_base; + + al_reg_write32_masked(&ctrl_regs->parctl, + DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_INT_CLR, + DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_INT_CLR); + + al_data_memory_barrier(); + + al_reg_write32(&nb_regs->global.nb_int_cause, + ~NB_GLOBAL_NB_INT_CAUSE_MCTL_PARITY_ERR); +} + +/******************************************************************************* + ******************************************************************************/ +static int al_ddr_address_map_get( + void __iomem *ddr_ctrl_regs_base, + struct al_ddr_addrmap *addrmap) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + uint32_t reg_val; + int addrmap_col_b2_11[10]; + int addrmap_bank_b0_2[3]; + int addrmap_row_b0_2_10[3]; + int addrmap_row_b11_15[5]; + int addrmap_cs_b0_1[2]; + + unsigned int i; + + enum al_ddr_data_width data_width = + al_ddr_data_width_get(ddr_ctrl_regs_base); + + /** + * CS address mapping + */ + reg_val = al_reg_read32(&ctrl_regs->addrmap0); + addrmap_cs_b0_1[1] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT1_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT1_SHIFT; + addrmap_cs_b0_1[0] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT0_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT0_SHIFT; + + /** + * Bank address mapping + */ + reg_val = al_reg_read32(&ctrl_regs->addrmap1); + addrmap_bank_b0_2[2] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B2_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B2_SHIFT; + addrmap_bank_b0_2[1] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B1_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B1_SHIFT; + addrmap_bank_b0_2[0] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B0_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B0_SHIFT; + + /** + * Column address mapping + */ + reg_val = al_reg_read32(&ctrl_regs->addrmap2); + addrmap_col_b2_11[3] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B5_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B5_SHIFT; + addrmap_col_b2_11[2] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B4_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B4_SHIFT; + addrmap_col_b2_11[1] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B3_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B3_SHIFT; + addrmap_col_b2_11[0] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B2_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B2_SHIFT; + + reg_val = al_reg_read32(&ctrl_regs->addrmap3); + addrmap_col_b2_11[7] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B9_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B9_SHIFT; + addrmap_col_b2_11[6] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B8_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B8_SHIFT; + addrmap_col_b2_11[5] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B7_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B7_SHIFT; + addrmap_col_b2_11[4] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B6_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B6_SHIFT; + + reg_val = al_reg_read32(&ctrl_regs->addrmap4); + addrmap_col_b2_11[9] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B11_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B11_SHIFT; + addrmap_col_b2_11[8] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B10_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B10_SHIFT; + + /** + * Row address mapping + */ + reg_val = al_reg_read32(&ctrl_regs->addrmap5); + addrmap_row_b11_15[0] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B11_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B11_SHIFT; + addrmap_row_b0_2_10[2] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B2_10_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B2_10_SHIFT; + addrmap_row_b0_2_10[1] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B1_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B1_SHIFT; + addrmap_row_b0_2_10[0] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B0_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B0_SHIFT; + + reg_val = al_reg_read32(&ctrl_regs->addrmap6); + addrmap_row_b11_15[4] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B15_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B15_SHIFT; + addrmap_row_b11_15[3] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B14_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B14_SHIFT; + addrmap_row_b11_15[2] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B13_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B13_SHIFT; + addrmap_row_b11_15[1] = (reg_val & + DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B12_MASK) + >> DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B12_SHIFT; + + /* Column */ + for (i = 0; i < (AL_ARR_SIZE(addrmap->col_b3_9_b11_13) - 1); i++) { + int user_val; + + user_val = addrmap_col_b2_11[i]; + + if (data_width == AL_DDR_DATA_WIDTH_64_BITS) + addrmap->col_b3_9_b11_13[i] = + (user_val == AL_DDR_ADDR_MAP_COL_DISABLED) ? + AL_DDR_ADDRMAP_NC : + (user_val + AL_DDR_ADDR_MAP_OFFSET + + AL_DDR_ADDR_MAP_COL_2_BASE + i); + else + addrmap->col_b3_9_b11_13[i + 1] = + (user_val == AL_DDR_ADDR_MAP_COL_DISABLED) ? + AL_DDR_ADDRMAP_NC : + (user_val + AL_DDR_ADDR_MAP_OFFSET + + AL_DDR_ADDR_MAP_COL_2_BASE + i); + } + + if (data_width == AL_DDR_DATA_WIDTH_64_BITS) + addrmap->col_b3_9_b11_13[i] = AL_DDR_ADDRMAP_NC; + if (data_width == AL_DDR_DATA_WIDTH_32_BITS) + addrmap->col_b3_9_b11_13[0] = 5; + + /* Bank */ + for (i = 0; i < AL_ARR_SIZE(addrmap->bank_b0_2); i++) { + int user_val = addrmap_bank_b0_2[i]; + + addrmap->bank_b0_2[i] = + (user_val == AL_DDR_ADDR_MAP_BANK_DISABLED) ? + AL_DDR_ADDRMAP_NC : + (user_val + AL_DDR_ADDR_MAP_OFFSET + + AL_DDR_ADDR_MAP_BANK_0_BASE + i); + } + + /* CS */ + for (i = 0; i < AL_ARR_SIZE(addrmap->cs_b0_1); i++) { + int user_val = addrmap_cs_b0_1[i]; + + addrmap->cs_b0_1[i] = + (user_val == AL_DDR_ADDR_MAP_CS_DISABLED) ? + AL_DDR_ADDRMAP_NC : + (user_val + AL_DDR_ADDR_MAP_OFFSET + + AL_DDR_ADDR_MAP_CS_0_BASE + i); + } + + /* Row */ + for (i = 0; i < AL_ARR_SIZE(addrmap->row_b0_2_10); i++) { + int user_val = addrmap_row_b0_2_10[i]; + + addrmap->row_b0_2_10[i] = + (user_val == AL_DDR_ADDR_MAP_ROW_DISABLED) ? + AL_DDR_ADDRMAP_NC : + (user_val + AL_DDR_ADDR_MAP_OFFSET + + AL_DDR_ADDR_MAP_ROW_0_BASE + i); + } + + for (i = 0; i < AL_ARR_SIZE(addrmap->row_b11_15); i++) { + int user_val = addrmap_row_b11_15[i]; + + addrmap->row_b11_15[i] = + (user_val == AL_DDR_ADDR_MAP_ROW_DISABLED) ? + AL_DDR_ADDRMAP_NC : + (user_val + AL_DDR_ADDR_MAP_OFFSET + + AL_DDR_ADDR_MAP_ROW_11_BASE + i); + } + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +enum al_ddr_data_width al_ddr_data_width_get( + void __iomem *ddr_ctrl_regs_base) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + uint32_t reg_val; + enum al_ddr_data_width data_width; + + reg_val = al_reg_read32(&ctrl_regs->mstr); + reg_val &= DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_MASK; + + data_width = + (reg_val == DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_64) ? + AL_DDR_DATA_WIDTH_64_BITS : + AL_DDR_DATA_WIDTH_32_BITS; + + return data_width; +} + +/******************************************************************************* + ******************************************************************************/ +int al_ddr_address_translate_sys2dram( + void __iomem *ddr_ctrl_regs_base, + al_phys_addr_t sys_address, + unsigned int *rank, + unsigned int *bank, + unsigned int *col, + unsigned int *row) +{ + + int i; + unsigned int temp_rank = 0; + unsigned int temp_bank = 0; + unsigned int temp_col = 0; + unsigned int temp_row = 0; + struct al_ddr_addrmap addrmap; + + enum al_ddr_data_width data_width = + al_ddr_data_width_get(ddr_ctrl_regs_base); + + al_ddr_address_map_get(ddr_ctrl_regs_base,&addrmap); + + if (data_width == AL_DDR_DATA_WIDTH_64_BITS) + temp_col += ((sys_address >> 3) & 0x7); + else + temp_col += ((sys_address >> 2) & 0x7); + + for (i = 0; i < 7; i++) + if (addrmap.col_b3_9_b11_13[i] != AL_DDR_ADDRMAP_NC){ + temp_col += (((sys_address >> addrmap.col_b3_9_b11_13[i]) & 0x1) << (i + 3)); + } + if (addrmap.col_b3_9_b11_13[7] != AL_DDR_ADDRMAP_NC) + temp_col += (((sys_address >> addrmap.col_b3_9_b11_13[7]) & 0x1) << 11); + if (addrmap.col_b3_9_b11_13[8] != AL_DDR_ADDRMAP_NC) + temp_col += (((sys_address >> addrmap.col_b3_9_b11_13[8]) & 0x1) << 13); + + for (i = 0; i < 3; i++) + if (addrmap.bank_b0_2[i] != AL_DDR_ADDRMAP_NC){ + temp_bank += (((sys_address >> addrmap.bank_b0_2[i]) & 0x1) << i); + } + + for (i = 0; i < 2; i++) + if (addrmap.row_b0_2_10[i] != AL_DDR_ADDRMAP_NC){ + temp_row += (((sys_address >> addrmap.row_b0_2_10[i]) & 0x1) << i); + } + + if (addrmap.row_b0_2_10[2] != AL_DDR_ADDRMAP_NC) + for (i = 0; i < 9; i++){ + temp_row += (((sys_address >> (addrmap.row_b0_2_10[2] + i)) & 0x1) << (i + 2)); + } + + for (i = 0; i < 5; i++) + if (addrmap.row_b11_15[i] != AL_DDR_ADDRMAP_NC){ + temp_row += (((sys_address >> addrmap.row_b11_15[i]) & 0x1) << (i + 11)); + } + + for (i = 0; i < 2; i++) + if (addrmap.cs_b0_1[i] != AL_DDR_ADDRMAP_NC){ + temp_rank += (((sys_address >> addrmap.cs_b0_1[i]) & 0x1) << i); + } + + *rank = temp_rank; + *bank = temp_bank; + *col = temp_col; + *row = temp_row; + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +int al_ddr_address_translate_dram2sys( + void __iomem *ddr_ctrl_regs_base, + al_phys_addr_t *sys_address, + unsigned int rank, + unsigned int bank, + unsigned int col, + unsigned int row) +{ + int i; + struct al_ddr_addrmap addrmap; + al_phys_addr_t address = 0; + + enum al_ddr_data_width data_width = + al_ddr_data_width_get(ddr_ctrl_regs_base); + al_ddr_address_map_get(ddr_ctrl_regs_base,&addrmap); + + if (data_width == AL_DDR_DATA_WIDTH_64_BITS) + address += ((col & 0x7) << 3); + else + address += ((col & 0x7) << 2); + + for (i = 0; i < 7; i++) + if (addrmap.col_b3_9_b11_13[i] != AL_DDR_ADDRMAP_NC){ + address += ((((al_phys_addr_t)col >> (i + 3)) & 0x1) << addrmap.col_b3_9_b11_13[i]); + } + if (addrmap.col_b3_9_b11_13[7] != AL_DDR_ADDRMAP_NC) + address += ((((al_phys_addr_t)col >> 11) & 0x1) << addrmap.col_b3_9_b11_13[7]); + if (addrmap.col_b3_9_b11_13[8] != AL_DDR_ADDRMAP_NC) + address += ((((al_phys_addr_t)col >> 13) & 0x1) << addrmap.col_b3_9_b11_13[8]); + + for (i = 0; i < 3; i++) + if (addrmap.bank_b0_2[i] != AL_DDR_ADDRMAP_NC){ + address += ((((al_phys_addr_t)bank >> (i)) & 0x1) << addrmap.bank_b0_2[i]); + } + + for (i = 0; i < 2; i++) + if (addrmap.row_b0_2_10[i] != AL_DDR_ADDRMAP_NC){ + address += ((((al_phys_addr_t)row >> (i)) & 0x1) << addrmap.row_b0_2_10[i]); + } + + if (addrmap.row_b0_2_10[2] != AL_DDR_ADDRMAP_NC) + for (i = 0; i < 9; i++){ + address += ((((al_phys_addr_t)row >> (i + 2)) & 0x1) << (addrmap.row_b0_2_10[2] + i)); + } + + + for (i = 0; i < 5; i++) + if (addrmap.row_b11_15[i] != AL_DDR_ADDRMAP_NC){ + address += ((((al_phys_addr_t)row >> (i + 11)) & 0x1) << addrmap.row_b11_15[i]); + } + + for (i = 0; i < 2; i++) + if (addrmap.cs_b0_1[i] != AL_DDR_ADDRMAP_NC){ + address += ((((al_phys_addr_t)rank >> (i)) & 0x1) << addrmap.cs_b0_1[i]); + } + + *sys_address = address; + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +unsigned int al_ddr_bits_per_rank_get( + void __iomem *ddr_ctrl_regs_base) +{ + int i, active_bits = 0; + struct al_ddr_addrmap addrmap; + enum al_ddr_data_width data_width = + al_ddr_data_width_get(ddr_ctrl_regs_base); + + al_ddr_address_map_get(ddr_ctrl_regs_base,&addrmap); + + /* 64bit systems have a 6bit offset, 32bit systems have a 5bit offset */ + if (data_width == AL_DDR_DATA_WIDTH_64_BITS) + active_bits += 6; + else + active_bits += 5; + + /* iterate over addrmap, count the amount of connected bits */ + for (i = 0; i < 9; i++) + if (addrmap.col_b3_9_b11_13[i] != AL_DDR_ADDRMAP_NC) + active_bits++; + + for (i = 0; i < 3; i++) + if (addrmap.bank_b0_2[i] != AL_DDR_ADDRMAP_NC) + active_bits++; + + for (i = 0; i < 2; i++) + if (addrmap.row_b0_2_10[i] != AL_DDR_ADDRMAP_NC) + active_bits++; + + if (addrmap.row_b0_2_10[2] != AL_DDR_ADDRMAP_NC) + active_bits += 9; + + for (i = 0; i < 5; i++) + if (addrmap.row_b11_15[i] != AL_DDR_ADDRMAP_NC) + active_bits++; + + return active_bits; +} + +/** @} end of DDR group */ + diff --git a/target/linux/alpine/files/drivers/edac/al/al_hal_ddr.h b/target/linux/alpine/files/drivers/edac/al/al_hal_ddr.h new file mode 100644 index 00000000000000..e43627b82bf399 --- /dev/null +++ b/target/linux/alpine/files/drivers/edac/al/al_hal_ddr.h @@ -0,0 +1,505 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup groupddr DDR controller & PHY hardrware abstraction layer + * @{ + * @file al_hal_ddr.h + * + * @brief Header file for the DDR HAL driver + */ + +#ifndef __AL_HAL_DDR_H__ +#define __AL_HAL_DDR_H__ + +#include +#include "al_hal_ddr_cfg.h" + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/** + * DDR address mapping - not connected bit + * See explanation about al_ddr_addrmap below. + */ +#define AL_DDR_ADDRMAP_NC 0xff + +/* Data Width */ +enum al_ddr_data_width { + AL_DDR_DATA_WIDTH_32_BITS, + AL_DDR_DATA_WIDTH_64_BITS, +}; + +/** + * Address mapping: + * Read and write requests are provided to the DDR controller with a system + * address. The system address is the command address of a transaction as + * presented on one of the data ports. The DDR controller is responsible for + * mapping this system address to rank, bank, row, and column addresses to the + * SDRAM. It converts the system address to a physical address. + * For each CS/bank/column/row bit assign a system memory address bit index. + * Set to AL_DDR_ADDRMAP_NC if not connected. + * CS minimal supported memory address bit index is 10. + * Bank minimal supported memory address bit index is 6. + * Column minimal supported memory address bit index is 4. + * Row minimal supported memory address bit index is 10. + * + * Address mapping might affect the system performance and should be optimized + * according to the specific application nature. The basic guideline is keeping + * as much open pages as possible and avoiding frequent closing of pages and + * opening new ones. + * + * Example: + * Mapping of 16GB memory device with 64 bits data width, 1KB page + * + * System address bit index | SDRAM required mapping + * ---------------------------------------------------- + * 33:32 cs[1:0] + * 31:16 row[15:0] + * 15:13 bank[2:0] + * 12:3 col[9:0] + * 2:0 N/A since 8 bytes are accessed at a time + * + * In this case the following setting is required: + * col_b3_9_b11_13 = { 6, 7, 8, 9, 10, 11, 12, AL_DDR_ADDRMAP_NC, ... } + * bank_b0_2 = { 13, 14, 15 } + * row_b0_2_10 = { 16, 17, 18 } + * row_b11_15 = { 27, 28, 29, 30, 31 } + * cs_b0_1 = { 32, 33 } + */ +struct al_ddr_addrmap { + /** + * Column bits 3 - 9, 11 - 13 + * Bit 3 relevant only for 64 bits data bus + * Bit 13 relevant only for 32 bits data bus + */ + uint8_t col_b3_9_b11_13[10]; + + /* Bank bits 0 - 2 */ + uint8_t bank_b0_2[3]; + + /** + * Row bits 0 - 2 + * Bits 3 - 10 are following bit 2 + */ + uint8_t row_b0_2_10[3]; + + /* Column bits 11 - 15 */ + uint8_t row_b11_15[5]; + + /* CS bits 0 - 1 */ + uint8_t cs_b0_1[2]; +}; + + +/* Data BIST mode */ +enum al_ddr_bist_mode { + /** + * Loopback mode: + * Address, commands and data loop back at the PHY I/Os + */ + AL_DDR_BIST_MODE_LOOPBACK, + + /** + * DRAM mode: + * Address, commands and data go to DRAM for normal memory accesses. + */ + AL_DDR_BIST_MODE_DRAM, +}; + +/* Data/AC BIST pattern */ +enum al_ddr_bist_pat { + /* Walking '0' */ + AL_DDR_BIST_PATTERN_WALK_0, + + /* Walking '1' */ + AL_DDR_BIST_PATTERN_WALK_1, + + /* LFSR-based pseudo-random */ + AL_DDR_BIST_PATTERN_LFSR, + + /* User programmable (Not valid for AC loopback) */ + AL_DDR_BIST_PATTERN_USER, +}; + +/* Data BIST parameters */ +struct al_ddr_bist_params { + /* Mode */ + enum al_ddr_bist_mode mode; + + /* Pattern */ + enum al_ddr_bist_pat pat; + + /** + * User Data Pattern 0: + * Data to be applied on even DQ pins during BIST. + * Valid values: 0x0000 - 0xffff + */ + unsigned int user_pat_even; + + /** + * User Data Pattern 1: + * Data to be applied on odd DQ pins during BIST. + * Valid values: 0x0000 - 0xffff + */ + unsigned int user_pat_odd; + + /** Word count + * Indicates the number of words to generate during BIST. + * Valid values are 4, 8, 12, 16, and so on. + * Maximal value: 0xfffc + */ + unsigned int wc; + + /** Address increment + * Selects the value by which the SDRAM address is incremented for each + * write/read access. This value must be at the beginning of a burst + * boundary, i.e. the lower bits must be "000". + * Maximal value: 0xff8 + */ + unsigned int inc; + + /** + * BIST Column Address: + * Selects the SDRAM column address to be used during BIST. The lower + * bits of this address must be "000". + */ + unsigned int col_min; + + /** + * BIST Maximum Column Address: + * Specifies the maximum SDRAM column address to be used during BIST + * before the address increments to the next row. + */ + unsigned int col_max; + + /** + * BIST Row Address: + * Selects the SDRAM row address to be used during BIST. + */ + unsigned int row_min; + + /** + * BIST Maximum Row Address: + * Specifies the maximum SDRAM row address to be used during BIST + * before the address increments to the next bank. + */ + unsigned int row_max; + + /** + * BIST Bank Address: + * Selects the SDRAM bank address to be used during BIST. + */ + unsigned int bank_min; + + /** + * BIST Maximum Bank Address: + * Specifies the maximum SDRAM bank address to be used during BIST + * before the address increments to the next rank. + */ + unsigned int bank_max; + + /** + * BIST Rank: + * Selects the SDRAM rank to be used during BIST. + */ + unsigned int rank_min; + + /** + * BIST Maximum Rank: + * Specifies the maximum SDRAM rank to be used during BIST. + */ + unsigned int rank_max; + + /** + * Active byte lanes to have the BIST applied upon. + * Lanes 0-3 can always have BIST applied upon. + * Lane 4 - only if ECC is supported by the DDR device. + * Lanes 5-8 - only for 64 bits data bus width. + */ + uint8_t active_byte_lanes[AL_DDR_PHY_NUM_BYTE_LANES]; +}; + +/* ECC status parameters */ +struct al_ddr_ecc_status { + /* Number of ECC errors detected */ + unsigned int err_cnt; + + /* Rank number of a read resulting in an ECC error */ + unsigned int rank; + + /* Bank number of a read resulting in an ECC error */ + unsigned int bank; + + /* Row number of a read resulting in an ECC error */ + unsigned int row; + + /* Collumn number of a read resulting in an ECC error */ + unsigned int col; + + /* Data pattern that resulted in a corrected error */ + uint32_t syndromes_31_0; + uint32_t syndromes_63_32; /* For 32-bit ECC - not used. */ + uint32_t syndromes_ecc; /* ECC lane */ + + /** + * Mask for the corrected data portion + * 1 on any bit indicates that the bit has been corrected by the ECC + * logic + * 0 on any bit indicates that the bit has not been corrected by the + * ECC logic + * This register accumulates data over multiple ECC errors, to give an + * overall indication of which bits are being fixed. It is cleared by + * calling al_ddr_ecc_corr_int_clear. + */ + uint32_t corr_bit_mask_31_0; + uint32_t corr_bit_mask_63_32; /* For 32-bit ECC - not used. */ + uint32_t corr_bit_mask_ecc; /* ECC lane */ + + /* Bit number corrected by single-bit ECC error */ + unsigned int ecc_corrected_bit_num; +}; + +struct al_ddr_ecc_cfg { + /* ECC mode indicator */ + al_bool ecc_enabled; + + /* Enable ECC scrubs - applicable only when ecc is enabled */ + al_bool scrub_enabled; +}; + +/* DDR controller power modes */ +enum al_ddr_power_mode { + /* No power mode enabled */ + AL_DDR_POWERMODE_OFF, + + /** + * Self refresh: + * Puts the SDRAM into self refresh when no active transactions + */ + AL_DDR_POWERMODE_SELF_REFRESH, + + /** + * Power down: + * The DDR controller goes into power-down after a + * programmable number of idle cycles (Multiples of 32 clocks) + */ + AL_DDR_POWERMODE_POWER_DOWN, + +}; + +/* DDR operating modes */ +enum al_ddr_operating_mode { + /* Initialiazation */ + AL_DDR_OPERATING_MODE_INIT, + + /* Normal operation */ + AL_DDR_OPERATING_MODE_NORMAL, + + /* Power down */ + AL_DDR_OPERATING_MODE_POWER_DOWN, + + /* Self refresh */ + AL_DDR_OPERATING_MODE_SELF_REFRESH, + +}; + +int al_ddr_phy_datx_bist( + void __iomem *ddr_ctrl_regs_base, + void __iomem *ddr_phy_regs_base, + struct al_ddr_bist_params *params); + +int al_ddr_phy_ac_bist( + void __iomem *ddr_phy_regs_base, + enum al_ddr_bist_pat pat); + +/** + * @brief Get current data bus width + * + * @param ddr_ctrl_regs_base + * Address of the DDR controller register base + * + * @returns The data bus width + */ +enum al_ddr_data_width al_ddr_data_width_get( + void __iomem *ddr_ctrl_regs_base); + +/** + * @brief Get the current number of available ranks + * + * @param ddr_phy_regs_base + * Address of the DDR controller register base + * + * @returns The number of available ranks + */ +unsigned int al_ddr_active_ranks_get( + void __iomem *ddr_ctrl_regs_base); + +/** + * @brief Get the current corrected/uncorrected error status + * + * @param ddr_ctrl_regs_base + * Address of the DDR controller register base + * @param corr_status + * The corrected error status (use NULL if no status is required) + * @param uncorr_status + * The uncorrected error status (use NULL if no status is required) + * + * @returns 0 if successful + * <0 otherwise + */ +int al_ddr_ecc_status_get( + void __iomem *ddr_ctrl_regs_base, + struct al_ddr_ecc_status *corr_status, + struct al_ddr_ecc_status *uncorr_status); + +/** + * @brief Get the current ECC configuration + * + * @param ddr_ctrl_regs_base + * Address of the DDR controller register base + * @param cfg + * The ECC configuration + */ +void al_ddr_ecc_cfg_get( + void __iomem *ddr_ctrl_regs_base, + struct al_ddr_ecc_cfg *cfg); + +int al_ddr_ecc_corr_count_clear( + void __iomem *ddr_ctrl_regs_base); + +/** + * @brief Clear the correctable error interrupt + * + * @param nb_regs_base + * Address of the NB register base, used i.o. to clear NB interrupt + * (use NULL if no clearing is required) + * @param ddr_ctrl_regs_base + * Address of the DDR controller register base + * + * @returns 0 if successful + * <0 otherwise + */ +int al_ddr_ecc_corr_int_clear( + void __iomem *nb_regs_base, + void __iomem *ddr_ctrl_regs_base); + +int al_ddr_ecc_uncorr_count_clear( + void __iomem *ddr_ctrl_regs_base); + +/** + * @brief Clear the uncorrectable error interrupt + * + * @param nb_regs_base + * Address of the NB register base, used i.o. to clear NB interrupt + * (use NULL if no clearing is required) + * @param ddr_ctrl_regs_base + * Address of the DDR controller register base + * + * @returns 0 if successful + * <0 otherwise + */ +int al_ddr_ecc_uncorr_int_clear( + void __iomem *nb_regs_base, + void __iomem *ddr_ctrl_regs_base); + +int al_ddr_ecc_data_poison_enable( + void __iomem *ddr_ctrl_regs_base, + unsigned int rank, + unsigned int bank, + unsigned int col, + unsigned int row); + +int al_ddr_ecc_data_poison_disable( + void __iomem *ddr_ctrl_regs_base); + +unsigned int al_ddr_parity_count_get( + void __iomem *ddr_ctrl_regs_base); + +void al_ddr_parity_count_clear( + void __iomem *ddr_ctrl_regs_base); + +void al_ddr_parity_int_clear( + void __iomem *nb_regs_base, + void __iomem *ddr_ctrl_regs_base); + +int al_ddr_power_mode_set( + void __iomem *ddr_ctrl_regs_base, + enum al_ddr_power_mode power_mode, + unsigned int timer_x32); + +enum al_ddr_operating_mode al_ddr_operating_mode_get( + void __iomem *ddr_ctrl_regs_base); + +int al_ddr_address_translate_sys2dram( + void __iomem *ddr_ctrl_regs_base, + al_phys_addr_t sys_address, + unsigned int *rank, + unsigned int *bank, + unsigned int *col, + unsigned int *row); + +int al_ddr_address_translate_dram2sys( + void __iomem *ddr_ctrl_regs_base, + al_phys_addr_t *sys_address, + unsigned int rank, + unsigned int bank, + unsigned int col, + unsigned int row); + +/** + * @brief Get the amount of connected address bits + * + * User can use these bits i.o. to calculate the memory device's rank size + * + * @param ddr_ctrl_regs_base + * Address of the DDR controller register base + * + * @returns Num of connected address bits (rank size == 1 << active_bits) + */ +unsigned int al_ddr_bits_per_rank_get( + void __iomem *ddr_ctrl_regs_base); + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of DDR group */ +#endif + diff --git a/target/linux/alpine/files/drivers/edac/al/al_hal_ddr_cfg.h b/target/linux/alpine/files/drivers/edac/al/al_hal_ddr_cfg.h new file mode 100644 index 00000000000000..d7e09ed430b95b --- /dev/null +++ b/target/linux/alpine/files/drivers/edac/al/al_hal_ddr_cfg.h @@ -0,0 +1,50 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ +#ifndef __AL_HAL_DDR_CFG_H__ +#define __AL_HAL_DDR_CFG_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* The number of byte lanes (including ECC) */ +#define AL_DDR_PHY_NUM_BYTE_LANES 9 + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/target/linux/alpine/files/drivers/edac/al/al_hal_ddr_ctrl_regs.h b/target/linux/alpine/files/drivers/edac/al/al_hal_ddr_ctrl_regs.h new file mode 100644 index 00000000000000..0191b97e8ac577 --- /dev/null +++ b/target/linux/alpine/files/drivers/edac/al/al_hal_ddr_ctrl_regs.h @@ -0,0 +1,1335 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @addtogroup groupddr + * + * @{ + * @file al_hal_ddr_ctrl_regs.h + * + * @brief DDR controller registers + * + */ + +#ifndef __AL_HAL_DDR_CTRL_REGS_H__ +#define __AL_HAL_DDR_CTRL_REGS_H__ + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ +struct al_dwc_ddr_umctl2_regs { + uint32_t mstr; /* Master Register */ + uint32_t stat; /* Operating Mode Status Regi ... */ + uint32_t rsrvd_0[2]; + uint32_t mrctrl0; /* Mode Register Read/Write C ... */ + uint32_t mrctrl1; /* Mode Register Read/Write C ... */ + uint32_t mrstat; /* Mode Register Read/Write S ... */ + uint32_t rsrvd_1[5]; + uint32_t pwrctl; /* Low Power Control Register ... */ + uint32_t pwrtmg; /* Low Power Timing Register */ + uint32_t rsrvd_2[6]; + uint32_t rfshctl0; /* Refresh Control Register 0 ... */ + uint32_t rfshctl1; /* Refresh Control Register 1 ... */ + uint32_t rfshctl2; /* Refresh Control Register 2 ... */ + uint32_t rsrvd_3; + uint32_t rfshctl3; /* Refresh Control Register 0 ... */ + uint32_t rfshtmg; /* Refresh Timing Register */ + uint32_t rsrvd_4[2]; + uint32_t ecccfg0; /* ECC Configuration Register ... */ + uint32_t ecccfg1; /* ECC Configuration Register ... */ + uint32_t eccstat; /* ECC Status Register */ + uint32_t eccclr; /* ECC Clear Register */ + uint32_t eccerrcnt; /* ECC Error Counter Register ... */ + uint32_t ecccaddr0; /* ECC Corrected Error Addres ... */ + uint32_t ecccaddr1; /* ECC Corrected Error Addres ... */ + uint32_t ecccsyn0; /* ECC Corrected Syndrome Reg ... */ + uint32_t ecccsyn1; /* ECC Corrected Syndrome Reg ... */ + uint32_t ecccsyn2; /* ECC Corrected Syndrome Reg ... */ + uint32_t eccbitmask0; /* ECC Corrected Data Bit Mas ... */ + uint32_t eccbitmask1; /* ECC Corrected Data Bit Mas ... */ + uint32_t eccbitmask2; /* ECC Corrected Data Bit Mas ... */ + uint32_t eccuaddr0; /* ECC Uncorrected Error Addr ... */ + uint32_t eccuaddr1; /* ECC Unorrected Error Addre ... */ + uint32_t eccusyn0; /* ECC Unorrected Syndrome Re ... */ + uint32_t eccusyn1; /* ECC Uncorrected Syndrome R ... */ + uint32_t eccusyn2; /* ECC Uncorrected Syndrome R ... */ + uint32_t eccpoisonaddr0; /* ECC Data Poisoning Address ... */ + uint32_t eccpoisonaddr1; /* ECC Data Poisoning Address ... */ + uint32_t parctl; /* Parity Control Register */ + uint32_t parstat; /* Parity Status Register */ + uint32_t rsrvd_5[2]; + uint32_t init0; /* SDRAM Initialization Regis ... */ + uint32_t init1; /* SDRAM Initialization Regis ... */ + uint32_t rsrvd_6; + uint32_t init3; /* SDRAM Initialization Regis ... */ + uint32_t init4; /* SDRAM Initialization Regis ... */ + uint32_t init5; /* SDRAM Initialization Regis ... */ + uint32_t rsrvd_7[2]; + uint32_t dimmctl; /* DIMM Control Register */ + uint32_t rankctl; /* Rank Control Register */ + uint32_t rsrvd_8[2]; + uint32_t dramtmg0; /* SDRAM Timing Register 0 */ + uint32_t dramtmg1; /* SDRAM Timing Register 1 */ + uint32_t dramtmg2; /* SDRAM Timing Register 2 */ + uint32_t dramtmg3; /* SDRAM Timing Register 3 */ + uint32_t dramtmg4; /* SDRAM Timing Register 4 */ + uint32_t dramtmg5; /* SDRAM Timing Register 5 */ + uint32_t rsrvd_9[2]; + uint32_t dramtmg8; /* SDRAM Timing Register 8 */ + uint32_t rsrvd_10[23]; + uint32_t zqctl0; /* ZQ Control Register 0 */ + uint32_t zqctl1; /* ZQ Control Register 1 */ + uint32_t rsrvd_11[2]; + uint32_t dfitmg0; /* DFI Timing Register 0 */ + uint32_t dfitmg1; /* DFI Timing Register 1 */ + uint32_t rsrvd_12[2]; + uint32_t dfiupd0; /* DFI Update Register 0 */ + uint32_t dfiupd1; /* DFI Update Register 1 */ + uint32_t dfiupd2; /* DFI Update Register 2 */ + uint32_t dfiupd3; /* DFI Update Register 3 */ + uint32_t dfimisc; /* DFI Miscellaneous Control ... */ + uint32_t rsrvd_13[19]; + uint32_t addrmap0; /* Address Map Register 0 */ + uint32_t addrmap1; /* Address Map Register 1 */ + uint32_t addrmap2; /* Address Map Register 2 */ + uint32_t addrmap3; /* Address Map Register 3 */ + uint32_t addrmap4; /* Address Map Register 4 */ + uint32_t addrmap5; /* Address Map Register 5 */ + uint32_t addrmap6; /* Address Map Register 6 */ + uint32_t rsrvd_14[9]; + uint32_t odtcfg; /* ODT Configuration Register ... */ + uint32_t odtmap; /* ODT/Rank Map Register */ + uint32_t rsrvd_15[2]; + uint32_t sched; /* Scheduler Control Register ... */ + uint32_t rsrvd_16; + uint32_t perfhpr0; /* High Priority Read CAM Reg ... */ + uint32_t perfhpr1; /* High Priority Read CAM Reg ... */ + uint32_t perflpr0; /* Low Priority Read CAM Regi ... */ + uint32_t perflpr1; /* Low Priority Read CAM Regi ... */ + uint32_t perfwr0; /* Write CAM Register 0 */ + uint32_t perfwr1; /* Write CAM Register 1 */ + uint32_t rsrvd_17[36]; + uint32_t dbg0; /* Debug Register 0 */ + uint32_t dbg1; /* Debug Register 1 */ + uint32_t dbgcam; /* CAM Debug Register */ + uint32_t rsrvd[61]; +}; +struct al_dwc_ddr_umctl2_mp { + uint32_t pccfg; /* Port Common Configuration ... */ + uint32_t pcfgr_0; /* Port 0 Configuration Read ... */ + uint32_t pcfgw_0; /* Port 0 Configuration Write ... */ + uint32_t pcfgidmaskch0_0; /* Port 0 Channel 0 Configura ... */ + uint32_t pcfgidvaluech0_0; /* Port 0 Channel 0 Configura ... */ + uint32_t rsrvd[1787]; +}; + +struct al_ddr_ctrl_regs { + struct al_dwc_ddr_umctl2_regs umctl2_regs; + struct al_dwc_ddr_umctl2_mp umctl2_mp; +}; + + +/* +* Registers Fields +*/ + + +/**** MSTR register ****/ +/* Select DDR3 SDRAM - 1 - DDR3 operating mode - 0 - DDR2 opera ... */ +#define DWC_DDR_UMCTL2_REGS_MSTR_DDR3 (1 << 0) + +#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED_MASK 0x000000FE +#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED_SHIFT 1 +/* Indicates burst mode */ +#define DWC_DDR_UMCTL2_REGS_MSTR_BURST_MODE (1 << 8) +/* When set, enable burst-chop in DDR3. */ +#define DWC_DDR_UMCTL2_REGS_MSTR_BURSTCHOP (1 << 9) +/* If 1, then uMCTL2 uses 2T timing */ +#define DWC_DDR_UMCTL2_REGS_MSTR_EN_2T_TIMING_MODE (1 << 10) + +#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED1 (1 << 11) +/* Selects proportion of DQ bus width that is used by the SDRAM ... */ +#define DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_MASK 0x00003000 +#define DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_SHIFT 12 + +#define DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_64 \ + (0 << DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_SHIFT) +#define DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_32 \ + (1 << DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_SHIFT) + +#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED2_MASK 0x0000C000 +#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED2_SHIFT 14 +/* SDRAM burst length used: - 0001 - Burst length of 2 (only su ... */ +#define DWC_DDR_UMCTL2_REGS_MSTR_BURST_RDWR_MASK 0x000F0000 +#define DWC_DDR_UMCTL2_REGS_MSTR_BURST_RDWR_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED3_MASK 0x00F00000 +#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED3_SHIFT 20 +/* Only present for multi-rank configurations */ +#define DWC_DDR_UMCTL2_REGS_MSTR_ACTIVE_RANKS_MASK 0x0F000000 +#define DWC_DDR_UMCTL2_REGS_MSTR_ACTIVE_RANKS_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED4_MASK 0xF0000000 +#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED4_SHIFT 28 + +/**** STAT register ****/ +/* Operating mode */ +#define DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_MASK 0x00000003 +#define DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_INIT \ + (0 << DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_SHIFT) +#define DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_NORMAL \ + (1 << DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_SHIFT) +#define DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_POWER_DOWN \ + (2 << DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_SHIFT) +#define DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_SELF_REFRESH \ + (3 << DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_SHIFT) + +#define DWC_DDR_UMCTL2_REGS_STAT_RESERVED_MASK 0xFFFFFFFC +#define DWC_DDR_UMCTL2_REGS_STAT_RESERVED_SHIFT 2 + +/**** MRCTRL0 register ****/ + +#define DWC_DDR_UMCTL2_REGS_MRCTRL0_RESERVED_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_MRCTRL0_RESERVED_SHIFT 0 +/* Controls which rank is accessed by MRCTRL0 */ +#define DWC_DDR_UMCTL2_REGS_MRCTRL0_MR_RANK_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_MRCTRL0_MR_RANK_SHIFT 4 + +#define DWC_DDR_UMCTL2_REGS_MRCTRL0_RESERVED1_MASK 0x00000F00 +#define DWC_DDR_UMCTL2_REGS_MRCTRL0_RESERVED1_SHIFT 8 +/* Address of the mode register that is to be written to */ +#define DWC_DDR_UMCTL2_REGS_MRCTRL0_MR_ADDR_MASK 0x00007000 +#define DWC_DDR_UMCTL2_REGS_MRCTRL0_MR_ADDR_SHIFT 12 + +#define DWC_DDR_UMCTL2_REGS_MRCTRL0_RESERVED2_MASK 0x7FFF8000 +#define DWC_DDR_UMCTL2_REGS_MRCTRL0_RESERVED2_SHIFT 15 +/* Setting this register bit to 1 triggers a mode register read ... */ +#define DWC_DDR_UMCTL2_REGS_MRCTRL0_MR_WR (1 << 31) + +/**** MRCTRL1 register ****/ +/* Mode register write data for all non-LPDDR2 modes */ +#define DWC_DDR_UMCTL2_REGS_MRCTRL1_MR_DATA_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_MRCTRL1_MR_DATA_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_MRCTRL1_RESERVED_MASK 0xFFFF0000 +#define DWC_DDR_UMCTL2_REGS_MRCTRL1_RESERVED_SHIFT 16 + +/**** MRSTAT register ****/ +/* SoC core may initiate a MR write operation only if this signa ... */ +#define DWC_DDR_UMCTL2_REGS_MRSTAT_MR_WR_BUSY (1 << 0) + +#define DWC_DDR_UMCTL2_REGS_MRSTAT_RESERVED_MASK 0xFFFFFFFE +#define DWC_DDR_UMCTL2_REGS_MRSTAT_RESERVED_SHIFT 1 + +/**** PWRCTL register ****/ +/* If set, then the uMCTL2 puts the SDRAM into self refresh when ... */ +#define DWC_DDR_UMCTL2_REGS_PWRCTL_SELFREF_EN (1 << 0) +/* If true then the uMCTL2 goes into power-down after a programm ... */ +#define DWC_DDR_UMCTL2_REGS_PWRCTL_POWERDOWN_EN (1 << 1) + +#define DWC_DDR_UMCTL2_REGS_PWRCTL_RESERVED (1 << 2) +/* Enable the assertion of dfi_dram_clk_disable whenever a clock ... */ +#define DWC_DDR_UMCTL2_REGS_PWRCTL_EN_DFI_DRAM_CLK_DISABLE (1 << 3) + +#define DWC_DDR_UMCTL2_REGS_PWRCTL_RESERVED1_MASK 0xFFFFFFF0 +#define DWC_DDR_UMCTL2_REGS_PWRCTL_RESERVED1_SHIFT 4 + +/**** PWRTMG register ****/ +/* After this many clocks of NOP or deselect the uMCTL2 puts the ... */ +#define DWC_DDR_UMCTL2_REGS_PWRTMG_POWERDOWN_TO_X32_MASK 0x0000001F +#define DWC_DDR_UMCTL2_REGS_PWRTMG_POWERDOWN_TO_X32_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_PWRTMG_RESERVED_MASK 0xFFFFFFE0 +#define DWC_DDR_UMCTL2_REGS_PWRTMG_RESERVED_SHIFT 5 + +/**** RFSHCTL0 register ****/ + +#define DWC_DDR_UMCTL2_REGS_RFSHCTL0_RESERVED_MASK 0x000000FF +#define DWC_DDR_UMCTL2_REGS_RFSHCTL0_RESERVED_SHIFT 0 +/* The programmed value + 1 is the number of refresh timeouts th ... */ +#define DWC_DDR_UMCTL2_REGS_RFSHCTL0_REFRESH_BURST_MASK 0x00000700 +#define DWC_DDR_UMCTL2_REGS_RFSHCTL0_REFRESH_BURST_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_RFSHCTL0_REFRESH_BURST(n) \ + (((n) - 1) << DWC_DDR_UMCTL2_REGS_RFSHCTL0_REFRESH_BURST_SHIFT) + +#define DWC_DDR_UMCTL2_REGS_RFSHCTL0_RESERVED1 (1 << 11) +/* If the refresh timer (tRFCnom, also known as tRFC (min): Minimum time from refresh to refresh ... */ +#define DWC_DDR_UMCTL2_REGS_RFSHTMG_T_RFC_MIN_MASK 0x000001FF +#define DWC_DDR_UMCTL2_REGS_RFSHTMG_T_RFC_MIN_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_RFSHTMG_RESERVED_MASK 0x0000FE00 +#define DWC_DDR_UMCTL2_REGS_RFSHTMG_RESERVED_SHIFT 9 +/* tREFI: Average time interval between refreshes per ... */ +#define DWC_DDR_UMCTL2_REGS_RFSHTMG_T_RFC_NOM_X32_MASK 0x0FFF0000 +#define DWC_DDR_UMCTL2_REGS_RFSHTMG_T_RFC_NOM_X32_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_RFSHTMG_RESERVED1_MASK 0xF0000000 +#define DWC_DDR_UMCTL2_REGS_RFSHTMG_RESERVED1_SHIFT 28 + +/**** ECCCFG0 register ****/ +/* ECC mode indicator */ +#define DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_MASK 0x00000007 +#define DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_DIS \ + (0 << DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_SHIFT) +#define DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_EN \ + (4 << DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_SHIFT) + +#define DWC_DDR_UMCTL2_REGS_ECCCFG0_RESERVED (1 << 3) +/* Disable ECC scrubs */ +#define DWC_DDR_UMCTL2_REGS_ECCCFG0_DIS_SCRUB (1 << 4) + +#define DWC_DDR_UMCTL2_REGS_ECCCFG0_RESERVED1_MASK 0xFFFFFFE0 +#define DWC_DDR_UMCTL2_REGS_ECCCFG0_RESERVED1_SHIFT 5 + +/**** ECCCFG1 register ****/ +/* Enable ECC data poisoning - introduces ECC errors on writes t ... */ +#define DWC_DDR_UMCTL2_REGS_ECCCFG1_DATA_POISON (1 << 0) + +#define DWC_DDR_UMCTL2_REGS_ECCCFG1_RESERVED_MASK 0xFFFFFFFE +#define DWC_DDR_UMCTL2_REGS_ECCCFG1_RESERVED_SHIFT 1 + +/**** ECCSTAT register ****/ +/* Bit number corrected by single-bit ECC error */ +#define DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_CORRECTED_BIT_NUM_MASK 0x0000007F +#define DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_CORRECTED_BIT_NUM_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ECCSTAT_RESERVED (1 << 7) +/* Single-bit error indicators, 1 per ECC lane */ +#define DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_CORRECTED_ERR_MASK 0x00000300 +#define DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_CORRECTED_ERR_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_ECCSTAT_RESERVED1_MASK 0x0000FC00 +#define DWC_DDR_UMCTL2_REGS_ECCSTAT_RESERVED1_SHIFT 10 +/* Double-bit error indicators, 1 per ECC lane */ +#define DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_UNCORRECTED_ERR_MASK 0x00030000 +#define DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_UNCORRECTED_ERR_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_ECCSTAT_RESERVED2_MASK 0xFFFC0000 +#define DWC_DDR_UMCTL2_REGS_ECCSTAT_RESERVED2_SHIFT 18 + +/**** ECCCLR register ****/ +/* Setting this regsiter bit to 1 clears the currently stored co ... */ +#define DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_CORR_ERR (1 << 0) +/* Setting this regsiter bit to 1 clears the currently stored un ... */ +#define DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_UNCORR_ERR (1 << 1) +/* Setting this regsiter bit to 1 clears the currently stored co ... */ +#define DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_CORR_ERR_CNT (1 << 2) +/* Setting this regsiter bit to 1 clears the currently stored un ... */ +#define DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_UNCORR_ERR_CNT (1 << 3) + +#define DWC_DDR_UMCTL2_REGS_ECCCLR_RESERVED_MASK 0xFFFFFFF0 +#define DWC_DDR_UMCTL2_REGS_ECCCLR_RESERVED_SHIFT 4 + +/**** ECCERRCNT register ****/ +/* Number of correctable ECC errors detected */ +#define DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_CORR_ERR_CNT_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_CORR_ERR_CNT_SHIFT 0 +/* Number of uncorrectable ECC errors detected */ +#define DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_UNCORR_ERR_CNT_MASK 0xFFFF0000 +#define DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_UNCORR_ERR_CNT_SHIFT 16 + +/**** ECCCADDR0 register ****/ +/* Page/row number of a read resulting in a corrected ECC error */ +#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_ROW_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_ROW_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_RESERVED_MASK 0x00FF0000 +#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_RESERVED_SHIFT 16 +/* Rank number of a read resulting in a corrected ECC error */ +#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_RANK_MASK 0x03000000 +#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_RANK_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_RESERVED1_MASK 0xFC000000 +#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_RESERVED1_SHIFT 26 + +/**** ECCCADDR1 register ****/ +/* Block number of a read resulting in a corrected ECC error (lo ... */ +#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_COL_MASK 0x00000FFF +#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_COL_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_RESERVED_MASK 0x0000F000 +#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_RESERVED_SHIFT 12 +/* Bank number of a read resulting in a corrected ECC error */ +#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_BANK_MASK 0x00070000 +#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_BANK_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_RESERVED1_MASK 0xFFF80000 +#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_RESERVED1_SHIFT 19 + +/**** ECCCSYN2 register ****/ +/* Data pattern that resulted in a corrected error one for each ... */ +#define DWC_DDR_UMCTL2_REGS_ECCCSYN2_ECC_CORR_SYNDROMES_71_64_MASK 0x000000FF +#define DWC_DDR_UMCTL2_REGS_ECCCSYN2_ECC_CORR_SYNDROMES_71_64_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ECCCSYN2_RESERVED_MASK 0xFFFFFF00 +#define DWC_DDR_UMCTL2_REGS_ECCCSYN2_RESERVED_SHIFT 8 + +/**** ECCBITMASK2 register ****/ +/* Mask for the corrected data portion - 1 on any bit indicat ... */ +#define DWC_DDR_UMCTL2_REGS_ECCBITMASK2_ECC_CORR_BIT_MASK_71_64_MASK 0x000000FF +#define DWC_DDR_UMCTL2_REGS_ECCBITMASK2_ECC_CORR_BIT_MASK_71_64_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ECCBITMASK2_RESERVED_MASK 0xFFFFFF00 +#define DWC_DDR_UMCTL2_REGS_ECCBITMASK2_RESERVED_SHIFT 8 + +/**** ECCUADDR0 register ****/ +/* Page/row number of a read resulting in an uncorrected ECC err ... */ +#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_ROW_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_ROW_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_RESERVED_MASK 0x00FF0000 +#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_RESERVED_SHIFT 16 +/* Rank number of a read resulting in an uncorrected ECC error */ +#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_RANK_MASK 0x03000000 +#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_RANK_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_RESERVED1_MASK 0xFC000000 +#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_RESERVED1_SHIFT 26 + +/**** ECCUADDR1 register ****/ +/* Block number of a read resulting in an uncorrected ECC error ... */ +#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_COL_MASK 0x00000FFF +#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_COL_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_RESERVED_MASK 0x0000F000 +#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_RESERVED_SHIFT 12 +/* Bank number of a read resulting in an uncorrected ECC error */ +#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_BANK_MASK 0x00070000 +#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_BANK_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_RESERVED1_MASK 0xFFF80000 +#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_RESERVED1_SHIFT 19 + +/**** ECCUSYN2 register ****/ +/* Data pattern that resulted in an uncorrected error one for ea ... */ +#define DWC_DDR_UMCTL2_REGS_ECCUSYN2_ECC_UNCORR_SYNDROMES_71_64_MASK 0x000000FF +#define DWC_DDR_UMCTL2_REGS_ECCUSYN2_ECC_UNCORR_SYNDROMES_71_64_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ECCUSYN2_RESERVED_MASK 0xFFFFFF00 +#define DWC_DDR_UMCTL2_REGS_ECCUSYN2_RESERVED_SHIFT 8 + +/**** ECCPOISONADDR0 register ****/ +/* Column address for ECC poisoning */ +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_ECC_POISON_COL_MASK 0x00000FFF +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_ECC_POISON_COL_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_RESERVED_MASK 0x00FFF000 +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_RESERVED_SHIFT 12 +/* Rank address for ECC poisoning */ +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_ECC_POISON_RANK_MASK 0x03000000 +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_ECC_POISON_RANK_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_RESERVED1_MASK 0xFC000000 +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_RESERVED1_SHIFT 26 + +/**** ECCPOISONADDR1 register ****/ +/* Row address for ECC poisoning */ +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_ECC_POISON_ROW_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_ECC_POISON_ROW_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_RESERVED_MASK 0x00FF0000 +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_RESERVED_SHIFT 16 +/* Bank address for ECC poisoning */ +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_ECC_POISON_BANK_MASK 0x07000000 +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_ECC_POISON_BANK_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_RESERVED1_MASK 0xF8000000 +#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_RESERVED1_SHIFT 27 + +/**** PARCTL register ****/ +/* Interrupt enable bit for DFI parity error */ +#define DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_INT_EN (1 << 0) +/* Interrupt clear bit for DFI parity error */ +#define DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_INT_CLR (1 << 1) +/* DFI parity error count clear */ +#define DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_CNT_CLR (1 << 2) + +#define DWC_DDR_UMCTL2_REGS_PARCTL_RESERVED_MASK 0xFFFFFFF8 +#define DWC_DDR_UMCTL2_REGS_PARCTL_RESERVED_SHIFT 3 + +/**** PARSTAT register ****/ +/* DFI parity error count */ +#define DWC_DDR_UMCTL2_REGS_PARSTAT_DFI_PARITY_ERR_CNT_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_PARSTAT_DFI_PARITY_ERR_CNT_SHIFT 0 +/* DFI parity error interrupt */ +#define DWC_DDR_UMCTL2_REGS_PARSTAT_DFI_PARITY_ERR_INT (1 << 16) + +#define DWC_DDR_UMCTL2_REGS_PARSTAT_RESERVED_MASK 0xFFFE0000 +#define DWC_DDR_UMCTL2_REGS_PARSTAT_RESERVED_SHIFT 17 + +/**** INIT0 register ****/ +/* Cycles to wait after reset before driving CKE high to start t ... */ +#define DWC_DDR_UMCTL2_REGS_INIT0_PRE_CKE_X1024_MASK 0x000003FF +#define DWC_DDR_UMCTL2_REGS_INIT0_PRE_CKE_X1024_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_INIT0_RESERVED_MASK 0x0000FC00 +#define DWC_DDR_UMCTL2_REGS_INIT0_RESERVED_SHIFT 10 +/* Cycles to wait after driving CKE high to start the SDRAM init ... */ +#define DWC_DDR_UMCTL2_REGS_INIT0_POST_CKE_X1024_MASK 0x03FF0000 +#define DWC_DDR_UMCTL2_REGS_INIT0_POST_CKE_X1024_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_INIT0_RESERVED1_MASK 0xFC000000 +#define DWC_DDR_UMCTL2_REGS_INIT0_RESERVED1_SHIFT 26 + +/**** INIT1 register ****/ +/* Wait period before driving the OCD complete command to SDRAM */ +#define DWC_DDR_UMCTL2_REGS_INIT1_PRE_OCD_X32_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_INIT1_PRE_OCD_X32_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_INIT1_RESERVED_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_INIT1_RESERVED_SHIFT 4 +/* Cycles to wait after completing the SDRAM initialization sequ ... */ +#define DWC_DDR_UMCTL2_REGS_INIT1_FINAL_WAIT_X32_MASK 0x00007F00 +#define DWC_DDR_UMCTL2_REGS_INIT1_FINAL_WAIT_X32_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_INIT1_RESERVED1 (1 << 15) +/* Number of cycles to assert SDRAM reset signal during init seq ... */ +#define DWC_DDR_UMCTL2_REGS_INIT1_DRAM_RSTN_X1024_MASK 0x00FF0000 +#define DWC_DDR_UMCTL2_REGS_INIT1_DRAM_RSTN_X1024_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_INIT1_RESERVED2_MASK 0xFF000000 +#define DWC_DDR_UMCTL2_REGS_INIT1_RESERVED2_SHIFT 24 + +/**** INIT3 register ****/ +/* Non LPDDR2-Value to be loaded into SDRAM EMR registers */ +#define DWC_DDR_UMCTL2_REGS_INIT3_EMR_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_INIT3_EMR_SHIFT 0 +/* Non LPDDR2-Value to be loaded into the SDRAM Mode register */ +#define DWC_DDR_UMCTL2_REGS_INIT3_MR_MASK 0xFFFF0000 +#define DWC_DDR_UMCTL2_REGS_INIT3_MR_SHIFT 16 + +/**** INIT4 register ****/ +/* Non LPDDR2- Value to be loaded into SDRAM EMR3 registers */ +#define DWC_DDR_UMCTL2_REGS_INIT4_EMR3_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_INIT4_EMR3_SHIFT 0 +/* Non LPDDR2- Value to be loaded into SDRAM EMR2 registers */ +#define DWC_DDR_UMCTL2_REGS_INIT4_EMR2_MASK 0xFFFF0000 +#define DWC_DDR_UMCTL2_REGS_INIT4_EMR2_SHIFT 16 + +/**** INIT5 register ****/ + +#define DWC_DDR_UMCTL2_REGS_INIT5_RESERVED_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_INIT5_RESERVED_SHIFT 0 +/* ZQ initial calibration, tZQINIT */ +#define DWC_DDR_UMCTL2_REGS_INIT5_DEV_ZQINIT_X32_MASK 0x00FF0000 +#define DWC_DDR_UMCTL2_REGS_INIT5_DEV_ZQINIT_X32_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_INIT5_RESERVED1_MASK 0xFF000000 +#define DWC_DDR_UMCTL2_REGS_INIT5_RESERVED1_SHIFT 24 + +/**** DIMMCTL register ****/ +/* Staggering enable for multi-rank accesses (for multi-rank UDI ... */ +#define DWC_DDR_UMCTL2_REGS_DIMMCTL_DIMM_STAGGER_CS_EN (1 << 0) +/* Address Mirroring Enable (for multi-rank UDIMM implementation ... */ +#define DWC_DDR_UMCTL2_REGS_DIMMCTL_DIMM_ADDR_MIRR_EN (1 << 1) + +#define DWC_DDR_UMCTL2_REGS_DIMMCTL_RESERVED_MASK 0xFFFFFFFC +#define DWC_DDR_UMCTL2_REGS_DIMMCTL_RESERVED_SHIFT 2 + +/**** RANKCTL register ****/ +/* Only present for multi-rank configurations */ +#define DWC_DDR_UMCTL2_REGS_RANKCTL_MAX_RANK_RD_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_RANKCTL_MAX_RANK_RD_SHIFT 0 +/* Only present for multi-rank configurations */ +#define DWC_DDR_UMCTL2_REGS_RANKCTL_DIFF_RANK_RD_GAP_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_RANKCTL_DIFF_RANK_RD_GAP_SHIFT 4 +/* Only present for multi-rank configurations */ +#define DWC_DDR_UMCTL2_REGS_RANKCTL_DIFF_RANK_WR_GAP_MASK 0x00000F00 +#define DWC_DDR_UMCTL2_REGS_RANKCTL_DIFF_RANK_WR_GAP_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_RANKCTL_RESERVED_MASK 0xFFFFF000 +#define DWC_DDR_UMCTL2_REGS_RANKCTL_RESERVED_SHIFT 12 + +/**** DRAMTMG0 register ****/ +/* tRAS(min): Minimum time between activate and prec ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_T_RAS_MIN_MASK 0x0000003F +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_T_RAS_MIN_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED_MASK 0x000000C0 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED_SHIFT 6 +/* tRAS(max): Maximum time between activate and prec ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_T_RAS_MAX_MASK 0x00003F00 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_T_RAS_MAX_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED1_MASK 0x0000C000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED1_SHIFT 14 +/* tFAW Valid only when 8 banks are present */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_T_FAW_MASK 0x003F0000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_T_FAW_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED2_MASK 0x00C00000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED2_SHIFT 22 +/* Minimum time between write and precharge to same bank */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_WR2PRE_MASK 0x3F000000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_WR2PRE_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED3_MASK 0xC0000000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED3_SHIFT 30 + +/**** DRAMTMG1 register ****/ +/* tRC: Minimum time between activates to same bank */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_T_RC_MASK 0x0000003F +#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_T_RC_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RESERVED_MASK 0x000000C0 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RESERVED_SHIFT 6 +/* tRTP: Minimum time from read to precharge of same ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RD2PRE_MASK 0x00001F00 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RD2PRE_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RESERVED1_MASK 0x0000E000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RESERVED1_SHIFT 13 +/* tXP: Minimum time after power-down exit to any ope ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_T_XP_MASK 0x001F0000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_T_XP_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RESERVED2_MASK 0xFFE00000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RESERVED2_SHIFT 21 + +/**** DRAMTMG2 register ****/ +/* WL + BL/2 + tWTR
Minimum time from write comman ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG2_WR2RD_MASK 0x0000003F +#define DWC_DDR_UMCTL2_REGS_DRAMTMG2_WR2RD_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG2_RESERVED_MASK 0x000000C0 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG2_RESERVED_SHIFT 6 +/* DDR2/3/mDDR: RL + BL/2 + 2 - WL
LPDDR2: RL + BL/2 + RU(tMOD: Present if MEMC_DDR3 = 1 only */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_T_MOD_MASK 0x000003FF +#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_T_MOD_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_RESERVED_MASK 0x00000C00 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_RESERVED_SHIFT 10 +/* tMRD: Cycles between load mode commands */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_T_MRD_MASK 0x00007000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_T_MRD_SHIFT 12 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_RESERVED1_MASK 0xFFFF8000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_RESERVED1_SHIFT 15 + +/**** DRAMTMG4 register ****/ +/* tRP: Minimum time from precharge to activate of s ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_RP_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_RP_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED_SHIFT 4 +/* tRRD: Minimum time between activates from bank "a ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_RRD_MASK 0x00000700 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_RRD_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED1_MASK 0x0000F800 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED1_SHIFT 11 +/* tCCD: This is the minimum time between two reads ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_CCD_MASK 0x00070000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_CCD_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED2_MASK 0x00F80000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED2_SHIFT 19 +/* tRCD - tAL: Minimum time from activate ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_RCD_MASK 0x0F000000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_RCD_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED3_MASK 0xF0000000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED3_SHIFT 28 + +/**** DRAMTMG5 register ****/ +/* Minimum number of cycles of CKE HIGH/LOW during power-down an ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKE_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKE_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED_SHIFT 4 +/* Minimum CKE low width for Self refresh entry to exit timing i ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKESR_MASK 0x00003F00 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKESR_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED1_MASK 0x0000C000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED1_SHIFT 14 +/* Specifies the number of DFI clock cycles from the de-assertio ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKSRE_MASK 0x000F0000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKSRE_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED2_MASK 0x00F00000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED2_SHIFT 20 +/* This is the time before Self Refresh Exit that CK is maintain ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKSRX_MASK 0x0F000000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKSRX_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED3_MASK 0xF0000000 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED3_SHIFT 28 + +/**** DRAMTMG8 register ****/ +/* Minimum time to wait after coming out of self refresh before ... */ +#define DWC_DDR_UMCTL2_REGS_DRAMTMG8_POST_SELFREF_GAP_X32_MASK 0x0000007F +#define DWC_DDR_UMCTL2_REGS_DRAMTMG8_POST_SELFREF_GAP_X32_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DRAMTMG8_RESERVED_MASK 0xFFFFFF80 +#define DWC_DDR_UMCTL2_REGS_DRAMTMG8_RESERVED_SHIFT 7 + +/**** ZQCTL0 register ****/ +/* tZQCS: Number of cycles of NOP required after a ZQ ... */ +#define DWC_DDR_UMCTL2_REGS_ZQCTL0_T_ZQ_SHORT_NOP_MASK 0x000003FF +#define DWC_DDR_UMCTL2_REGS_ZQCTL0_T_ZQ_SHORT_NOP_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ZQCTL0_RESERVED_MASK 0x0000FC00 +#define DWC_DDR_UMCTL2_REGS_ZQCTL0_RESERVED_SHIFT 10 +/* tZQOPER for DDR3, tZQCL for LPDDR2: Num ... */ +#define DWC_DDR_UMCTL2_REGS_ZQCTL0_T_ZQ_LONG_NOP_MASK 0x03FF0000 +#define DWC_DDR_UMCTL2_REGS_ZQCTL0_T_ZQ_LONG_NOP_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_ZQCTL0_RESERVED1_MASK 0x3C000000 +#define DWC_DDR_UMCTL2_REGS_ZQCTL0_RESERVED1_SHIFT 26 +/* - 1 - Disable issuing of ZQCL command at Self-Refresh exit */ +#define DWC_DDR_UMCTL2_REGS_ZQCTL0_DIS_SRX_ZQCL (1 << 30) +/* - 1 - Disable uMCTL2 generation of ZQCS command */ +#define DWC_DDR_UMCTL2_REGS_ZQCTL0_DIS_AUTO_ZQ (1 << 31) + +/**** ZQCTL1 register ****/ +/* Average interval to wait between automatically issuing ZQCS ( ... */ +#define DWC_DDR_UMCTL2_REGS_ZQCTL1_T_ZQ_SHORT_INTERVAL_X1024_MASK 0x000FFFFF +#define DWC_DDR_UMCTL2_REGS_ZQCTL1_T_ZQ_SHORT_INTERVAL_X1024_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ZQCTL1_RESERVED_MASK 0xFFF00000 +#define DWC_DDR_UMCTL2_REGS_ZQCTL1_RESERVED_SHIFT 20 + +/**** DFITMG0 register ****/ +/* Write latency
Number of clocks from the write command to w ... */ +#define DWC_DDR_UMCTL2_REGS_DFITMG0_WRITE_LATENCY_MASK 0x0000001F +#define DWC_DDR_UMCTL2_REGS_DFITMG0_WRITE_LATENCY_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED_MASK 0x000000E0 +#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED_SHIFT 5 +/* Specifies the number of clock cycles between when dfi_wrdata_ ... */ +#define DWC_DDR_UMCTL2_REGS_DFITMG0_DFI_TPHY_WRDATA_MASK 0x00001F00 +#define DWC_DDR_UMCTL2_REGS_DFITMG0_DFI_TPHY_WRDATA_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED1_MASK 0x0000E000 +#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED1_SHIFT 13 +/* Time from the assertion of a read command on the DFI interfac ... */ +#define DWC_DDR_UMCTL2_REGS_DFITMG0_DFI_T_RDDATA_EN_MASK 0x001F0000 +#define DWC_DDR_UMCTL2_REGS_DFITMG0_DFI_T_RDDATA_EN_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED2_MASK 0x00E00000 +#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED2_SHIFT 21 +/* Specifies the number of DFI clock cycles after an assertion o ... */ +#define DWC_DDR_UMCTL2_REGS_DFITMG0_DFI_T_CTRL_DELAY_MASK 0x0F000000 +#define DWC_DDR_UMCTL2_REGS_DFITMG0_DFI_T_CTRL_DELAY_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED3_MASK 0xF0000000 +#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED3_SHIFT 28 + +/**** DFITMG1 register ****/ +/* Specifies the number of DFI clock cycles from the de-assertio ... */ +#define DWC_DDR_UMCTL2_REGS_DFITMG1_DFI_T_DRAM_CLK_ENABLE_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_DFITMG1_DFI_T_DRAM_CLK_ENABLE_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DFITMG1_RESERVED_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_DFITMG1_RESERVED_SHIFT 4 +/* Specifies the number of DFI clock cycles from the assertion o ... */ +#define DWC_DDR_UMCTL2_REGS_DFITMG1_DFI_T_DRAM_CLK_DISABLE_MASK 0x00000F00 +#define DWC_DDR_UMCTL2_REGS_DFITMG1_DFI_T_DRAM_CLK_DISABLE_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_DFITMG1_RESERVED1_MASK 0xFFFFF000 +#define DWC_DDR_UMCTL2_REGS_DFITMG1_RESERVED1_SHIFT 12 + +/**** DFIUPD0 register ****/ +/* Specifies the minimum number of clock cycles that the dfi_ctr ... */ +#define DWC_DDR_UMCTL2_REGS_DFIUPD0_DFI_T_CTRLUP_MIN_MASK 0x000003FF +#define DWC_DDR_UMCTL2_REGS_DFIUPD0_DFI_T_CTRLUP_MIN_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DFIUPD0_RESERVED_MASK 0x0000FC00 +#define DWC_DDR_UMCTL2_REGS_DFIUPD0_RESERVED_SHIFT 10 +/* Specifies the maximum number of clock cycles that the dfi_ctr ... */ +#define DWC_DDR_UMCTL2_REGS_DFIUPD0_DFI_T_CTRLUP_MAX_MASK 0x03FF0000 +#define DWC_DDR_UMCTL2_REGS_DFIUPD0_DFI_T_CTRLUP_MAX_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_DFIUPD0_RESERVED1_MASK 0x7C000000 +#define DWC_DDR_UMCTL2_REGS_DFIUPD0_RESERVED1_SHIFT 26 +/* When '1', disable co_gs_dll_calib generated by the uMCTL2 */ +#define DWC_DDR_UMCTL2_REGS_DFIUPD0_DIS_DLL_CALIB (1 << 31) + +/**** DFIUPD1 register ****/ +/* This is the maximum amount of time between uMCTL2 initiated D ... */ +#define DWC_DDR_UMCTL2_REGS_DFIUPD1_DFI_T_CTRLUPD_INTERVAL_MAX_X1024_MASK 0x000000FF +#define DWC_DDR_UMCTL2_REGS_DFIUPD1_DFI_T_CTRLUPD_INTERVAL_MAX_X1024_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DFIUPD1_RESERVED_MASK 0x0000FF00 +#define DWC_DDR_UMCTL2_REGS_DFIUPD1_RESERVED_SHIFT 8 +/* This is the minimum amount of time between uMCTL2 initiated D ... */ +#define DWC_DDR_UMCTL2_REGS_DFIUPD1_DFI_T_CTRLUPD_INTERVAL_MIN_X1024_MASK 0x00FF0000 +#define DWC_DDR_UMCTL2_REGS_DFIUPD1_DFI_T_CTRLUPD_INTERVAL_MIN_X1024_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_DFIUPD1_RESERVED1_MASK 0xFF000000 +#define DWC_DDR_UMCTL2_REGS_DFIUPD1_RESERVED1_SHIFT 24 + +/**** DFIUPD2 register ****/ +/* Specifies the maximum number of DFI clock cycles that the dfi ... */ +#define DWC_DDR_UMCTL2_REGS_DFIUPD2_DFI_PHYUPD_TYPE0_MASK 0x00000FFF +#define DWC_DDR_UMCTL2_REGS_DFIUPD2_DFI_PHYUPD_TYPE0_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DFIUPD2_RESERVED_MASK 0x0000F000 +#define DWC_DDR_UMCTL2_REGS_DFIUPD2_RESERVED_SHIFT 12 +/* Specifies the maximum number of DFI clock cycles that the dfi ... */ +#define DWC_DDR_UMCTL2_REGS_DFIUPD2_DFI_PHYUPD_TYPE1_MASK 0x0FFF0000 +#define DWC_DDR_UMCTL2_REGS_DFIUPD2_DFI_PHYUPD_TYPE1_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_DFIUPD2_RESERVED1_MASK 0x70000000 +#define DWC_DDR_UMCTL2_REGS_DFIUPD2_RESERVED1_SHIFT 28 +/* Enables the support for acknowledging PHY-initiated updates: ... */ +#define DWC_DDR_UMCTL2_REGS_DFIUPD2_DFI_PHYUPD_EN (1 << 31) + +/**** DFIUPD3 register ****/ +/* Specifies the maximum number of DFI clock cycles that the dfi ... */ +#define DWC_DDR_UMCTL2_REGS_DFIUPD3_DFI_PHYUPD_TYPE2_MASK 0x00000FFF +#define DWC_DDR_UMCTL2_REGS_DFIUPD3_DFI_PHYUPD_TYPE2_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DFIUPD3_RESERVED_MASK 0x0000F000 +#define DWC_DDR_UMCTL2_REGS_DFIUPD3_RESERVED_SHIFT 12 +/* Specifies the maximum number of DFI clock cycles that the dfi ... */ +#define DWC_DDR_UMCTL2_REGS_DFIUPD3_DFI_PHYUPD_TYPE3_MASK 0x0FFF0000 +#define DWC_DDR_UMCTL2_REGS_DFIUPD3_DFI_PHYUPD_TYPE3_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_DFIUPD3_RESERVED1_MASK 0xF0000000 +#define DWC_DDR_UMCTL2_REGS_DFIUPD3_RESERVED1_SHIFT 28 + +/**** DFIMISC register ****/ +/* PHY initialization complete enable signal */ +#define DWC_DDR_UMCTL2_REGS_DFIMISC_DFI_INIT_COMPLETE_EN (1 << 0) + +#define DWC_DDR_UMCTL2_REGS_DFIMISC_RESERVED_MASK 0xFFFFFFFE +#define DWC_DDR_UMCTL2_REGS_DFIMISC_RESERVED_SHIFT 1 + +/* Address mapping bases */ +#define AL_DDR_ADDR_MAP_CS_0_BASE 6 + +#define AL_DDR_ADDR_MAP_CS_DISABLED 31 + +#define AL_DDR_ADDR_MAP_BANK_0_BASE 2 + +#define AL_DDR_ADDR_MAP_BANK_DISABLED 15 + +#define AL_DDR_ADDR_MAP_COL_2_BASE 2 + +#define AL_DDR_ADDR_MAP_COL_DISABLED 15 + +#define AL_DDR_ADDR_MAP_ROW_0_BASE 6 +#define AL_DDR_ADDR_MAP_ROW_11_BASE 17 + +#define AL_DDR_ADDR_MAP_ROW_DISABLED 15 + +#define AL_DDR_ADDR_MAP_OFFSET 4 + +/**** ADDRMAP0 register ****/ +/* Selects the HIF address bit used as rank address bit 0 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT0_MASK 0x0000001F +#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT0_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_RESERVED_MASK 0x000000E0 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_RESERVED_SHIFT 5 +/* Selects the HIF address bit used as rank address bit 1 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT1_MASK 0x00001F00 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT1_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_RESERVED1_MASK 0xFFFFE000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_RESERVED1_SHIFT 13 + +/**** ADDRMAP1 register ****/ +/* Selects the HIF address bits used as bank address bit 0 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B0_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B0_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_RESERVED_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_RESERVED_SHIFT 4 +/* Selects the HIF address bits used as bank address bit 1 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B1_MASK 0x00000F00 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B1_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_RESERVED1_MASK 0x0000F000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_RESERVED1_SHIFT 12 +/* Selects the HIF address bit used as bank address bit 2 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B2_MASK 0x000F0000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B2_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_RESERVED2_MASK 0xFFF00000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_RESERVED2_SHIFT 20 + +/**** ADDRMAP2 register ****/ +/* Full bus width mode: Selects the HIF address bit used as colu ... */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B2_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B2_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED_SHIFT 4 +/* Full bus width mode: Selects the HIF address bit used as colu ... */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B3_MASK 0x00000F00 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B3_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED1_MASK 0x0000F000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED1_SHIFT 12 +/* Full bus width mode: Selects the HIF address bit used as colu ... */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B4_MASK 0x000F0000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B4_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED2_MASK 0x00F00000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED2_SHIFT 20 +/* Full bus width mode: Selects the HIF address bit used as colu ... */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B5_MASK 0x0F000000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B5_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED3_MASK 0xF0000000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED3_SHIFT 28 + +/**** ADDRMAP3 register ****/ +/* Full bus width mode: Selects the HIF address bit used as colu ... */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B6_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B6_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED_SHIFT 4 +/* Full bus width mode: Selects the HIF address bit used as colu ... */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B7_MASK 0x00000F00 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B7_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED1_MASK 0x0000F000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED1_SHIFT 12 +/* Full bus width mode: Selects the HIF address bit used as colu ... */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B8_MASK 0x000F0000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B8_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED2_MASK 0x00F00000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED2_SHIFT 20 +/* Full bus width mode: Selects the HIF address bit used as colu ... */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B9_MASK 0x0F000000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B9_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED3_MASK 0xF0000000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED3_SHIFT 28 + +/**** ADDRMAP4 register ****/ +/* Full bus width mode: Selects the HIF address bit used as colu ... */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B10_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B10_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_RESERVED_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_RESERVED_SHIFT 4 +/* Full bus width mode: Selects the HIF address bit used as colu ... */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B11_MASK 0x00000F00 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B11_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_RESERVED1_MASK 0xFFFFF000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_RESERVED1_SHIFT 12 + +/**** ADDRMAP5 register ****/ +/* Selects the HIF address bits used as row address bit 0 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B0_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B0_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED_SHIFT 4 +/* Selects the HIF address bits used as row address bit 1 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B1_MASK 0x00000F00 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B1_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED1_MASK 0x0000F000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED1_SHIFT 12 +/* Selects the HIF address bits used as row address bits 2 to 10 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B2_10_MASK 0x000F0000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B2_10_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED2_MASK 0x00F00000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED2_SHIFT 20 +/* Selects the HIF address bit used as row address bit 11 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B11_MASK 0x0F000000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B11_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED3_MASK 0xF0000000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED3_SHIFT 28 + +/**** ADDRMAP6 register ****/ +/* Selects the HIF address bit used as row address bit 12 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B12_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B12_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED_SHIFT 4 +/* Selects the HIF address bit used as row address bit 13 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B13_MASK 0x00000F00 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B13_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED1_MASK 0x0000F000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED1_SHIFT 12 +/* Selects the HIF address bit used as row address bit 14 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B14_MASK 0x000F0000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B14_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED2_MASK 0x00F00000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED2_SHIFT 20 +/* Selects the HIF address bit used as row address bit 15 */ +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B15_MASK 0x0F000000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B15_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED3_MASK 0xF0000000 +#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED3_SHIFT 28 + +/**** ODTCFG register ****/ +/* Controls blocking of commands for ODT - 00 - Block read/write ... */ +#define DWC_DDR_UMCTL2_REGS_ODTCFG_WR_ODT_BLOCK_MASK 0x00000003 +#define DWC_DDR_UMCTL2_REGS_ODTCFG_WR_ODT_BLOCK_SHIFT 0 + +/* The delay, in clock cycles, from issuing a read command to ... */ +#define DWC_DDR_UMCTL2_REGS_ODTCFG_RD_ODT_DELAY_MASK 0x0000003C +#define DWC_DDR_UMCTL2_REGS_ODTCFG_RD_ODT_DELAY_SHIFT 2 + +#define DWC_DDR_UMCTL2_REGS_ODTCFG_RESERVED_MASK 0x000000C0 +#define DWC_DDR_UMCTL2_REGS_ODTCFG_RESERVED_SHIFT 6 + +/* Cycles to hold ODT for a read command */ +#define DWC_DDR_UMCTL2_REGS_ODTCFG_RD_ODT_HOLD_MASK 0x00000F00 +#define DWC_DDR_UMCTL2_REGS_ODTCFG_RD_ODT_HOLD_SHIFT 8 + +/* The delay, in clock cycles, from issuing a write command to ... */ +#define DWC_DDR_UMCTL2_REGS_ODTCFG_WR_ODT_DELAY_MASK 0x000F0000 +#define DWC_DDR_UMCTL2_REGS_ODTCFG_WR_ODT_DELAY_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_ODTCFG_RESERVED1_MASK 0x00F00000 +#define DWC_DDR_UMCTL2_REGS_ODTCFG_RESERVED1_SHIFT 20 +/* Cycles to hold ODT for a write command */ +#define DWC_DDR_UMCTL2_REGS_ODTCFG_WR_ODT_HOLD_MASK 0x0F000000 +#define DWC_DDR_UMCTL2_REGS_ODTCFG_WR_ODT_HOLD_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_ODTCFG_RESERVED2_MASK 0xF0000000 +#define DWC_DDR_UMCTL2_REGS_ODTCFG_RESERVED2_SHIFT 28 + +/**** ODTMAP register ****/ +/* Indicates which remote ODTs must be turned on during a write ... */ +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK0_WR_ODT_MASK 0x0000000F +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK0_WR_ODT_SHIFT 0 + +/* Indicates which remote ODTs must be turned on during a read ... */ +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK0_RD_ODT_MASK 0x000000F0 +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK0_RD_ODT_SHIFT 4 + +/* Indicates which remote ODTs must be turned on during a write ... */ +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK1_WR_ODT_MASK 0x00000F00 +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK1_WR_ODT_SHIFT 8 + +/* Indicates which remote ODTs must be turned on during a read ... */ +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK1_RD_ODT_MASK 0x0000F000 +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK1_RD_ODT_SHIFT 12 + +/* Indicates which remote ODTs must be turned on during a write ... */ +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK2_WR_ODT_MASK 0x000F0000 +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK2_WR_ODT_SHIFT 16 + +/* Indicates which remote ODTs must be turned on during a read ... */ +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK2_RD_ODT_MASK 0x00F00000 +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK2_RD_ODT_SHIFT 20 + +/* Indicates which remote ODTs must be turned on during a write ... */ +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK3_WR_ODT_MASK 0x0F000000 +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK3_WR_ODT_SHIFT 24 + +/* Indicates which remote ODTs must be turned on during a read ... */ +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK3_RD_ODT_MASK 0xF0000000 +#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK3_RD_ODT_SHIFT 28 + +/**** SCHED register ****/ +/* Active low signal */ +#define DWC_DDR_UMCTL2_REGS_SCHED_FORCE_LOW_PRI_N (1 << 0) +/* If set then the bank selector prefers writes over reads */ +#define DWC_DDR_UMCTL2_REGS_SCHED_PREFER_WRITE (1 << 1) +/* If true, bank is closed until transactions are available for ... */ +#define DWC_DDR_UMCTL2_REGS_SCHED_PAGECLOSE (1 << 2) + +#define DWC_DDR_UMCTL2_REGS_SCHED_RESERVED_MASK 0x000000F8 +#define DWC_DDR_UMCTL2_REGS_SCHED_RESERVED_SHIFT 3 +/* Number of entries in the low priority transaction store is th ... */ +#define DWC_DDR_UMCTL2_REGS_SCHED_LPR_NUM_ENTRIES_MASK 0x00001F00 +#define DWC_DDR_UMCTL2_REGS_SCHED_LPR_NUM_ENTRIES_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_SCHED_RESERVED1_MASK 0x0000E000 +#define DWC_DDR_UMCTL2_REGS_SCHED_RESERVED1_SHIFT 13 +/* Describes the number of cycles that co_gs_go2critical_rd or c ... */ +#define DWC_DDR_UMCTL2_REGS_SCHED_GO2CRITICAL_HYSTERESIS_MASK 0x00FF0000 +#define DWC_DDR_UMCTL2_REGS_SCHED_GO2CRITICAL_HYSTERESIS_SHIFT 16 +/* When the preferred transaction store is empty for these many ... */ +#define DWC_DDR_UMCTL2_REGS_SCHED_RDWR_IDLE_GAP_MASK 0x7F000000 +#define DWC_DDR_UMCTL2_REGS_SCHED_RDWR_IDLE_GAP_SHIFT 24 + +#define DWC_DDR_UMCTL2_REGS_SCHED_RESERVED2 (1 << 31) + +/**** PERFHPR0 register ****/ +/* Number of clocks that the HPR queue is guaranteed to stay in ... */ +#define DWC_DDR_UMCTL2_REGS_PERFHPR0_HPR_MIN_NON_CRITICAL_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_PERFHPR0_HPR_MIN_NON_CRITICAL_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_PERFHPR0_RESERVED_MASK 0xFFFF0000 +#define DWC_DDR_UMCTL2_REGS_PERFHPR0_RESERVED_SHIFT 16 + +/**** PERFHPR1 register ****/ +/* Number of clocks that the HPR queue can be starved before it ... */ +#define DWC_DDR_UMCTL2_REGS_PERFHPR1_HPR_MAX_STARVE_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_PERFHPR1_HPR_MAX_STARVE_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_PERFHPR1_RESERVED_MASK 0x00FF0000 +#define DWC_DDR_UMCTL2_REGS_PERFHPR1_RESERVED_SHIFT 16 +/* Number of transactions that are serviced once the HPR queue g ... */ +#define DWC_DDR_UMCTL2_REGS_PERFHPR1_HPR_XACT_RUN_LENGTH_MASK 0xFF000000 +#define DWC_DDR_UMCTL2_REGS_PERFHPR1_HPR_XACT_RUN_LENGTH_SHIFT 24 + +/**** PERFLPR0 register ****/ +/* Number of clocks that the LPR queue is guaranteed to be non-c ... */ +#define DWC_DDR_UMCTL2_REGS_PERFLPR0_LPR_MIN_NON_CRITICAL_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_PERFLPR0_LPR_MIN_NON_CRITICAL_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_PERFLPR0_RESERVED_MASK 0xFFFF0000 +#define DWC_DDR_UMCTL2_REGS_PERFLPR0_RESERVED_SHIFT 16 + +/**** PERFLPR1 register ****/ +/* Number of clocks that the LPR queue can be starved before it ... */ +#define DWC_DDR_UMCTL2_REGS_PERFLPR1_LPR_MAX_STARVE_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_PERFLPR1_LPR_MAX_STARVE_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_PERFLPR1_RESERVED_MASK 0x00FF0000 +#define DWC_DDR_UMCTL2_REGS_PERFLPR1_RESERVED_SHIFT 16 +/* Number of transactions that are serviced once the LPR queue g ... */ +#define DWC_DDR_UMCTL2_REGS_PERFLPR1_LPR_XACT_RUN_LENGTH_MASK 0xFF000000 +#define DWC_DDR_UMCTL2_REGS_PERFLPR1_LPR_XACT_RUN_LENGTH_SHIFT 24 + +/**** PERFWR0 register ****/ +/* Number of clocks that the write queue is guaranteed to be non ... */ +#define DWC_DDR_UMCTL2_REGS_PERFWR0_W_MIN_NON_CRITICAL_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_PERFWR0_W_MIN_NON_CRITICAL_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_PERFWR0_RESERVED_MASK 0xFFFF0000 +#define DWC_DDR_UMCTL2_REGS_PERFWR0_RESERVED_SHIFT 16 + +/**** PERFWR1 register ****/ +/* Number of clocks that the write queue can be starved before i ... */ +#define DWC_DDR_UMCTL2_REGS_PERFWR1_W_MAX_STARVE_MASK 0x0000FFFF +#define DWC_DDR_UMCTL2_REGS_PERFWR1_W_MAX_STARVE_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_PERFWR1_RESERVED_MASK 0x00FF0000 +#define DWC_DDR_UMCTL2_REGS_PERFWR1_RESERVED_SHIFT 16 +/* Number of transactions that are serviced once the WR queue go ... */ +#define DWC_DDR_UMCTL2_REGS_PERFWR1_W_XACT_RUN_LENGTH_MASK 0xFF000000 +#define DWC_DDR_UMCTL2_REGS_PERFWR1_W_XACT_RUN_LENGTH_SHIFT 24 + +/**** DBG0 register ****/ +/* When 1, disable write combine.
FOR DEBUG ONLY */ +#define DWC_DDR_UMCTL2_REGS_DBG0_DIS_WC (1 << 0) +/* Only present in designs supporting read bypass */ +#define DWC_DDR_UMCTL2_REGS_DBG0_DIS_RD_BYPASS (1 << 1) +/* Only present in designs supporting activate bypass */ +#define DWC_DDR_UMCTL2_REGS_DBG0_DIS_ACT_BYPASS (1 << 2) + +#define DWC_DDR_UMCTL2_REGS_DBG0_RESERVED (1 << 3) +/* When this is set to '0', auto-precharge is disabled for the f ... */ +#define DWC_DDR_UMCTL2_REGS_DBG0_DIS_COLLISION_PAGE_OPT (1 << 4) + +#define DWC_DDR_UMCTL2_REGS_DBG0_RESERVED1_MASK 0xFFFFFFE0 +#define DWC_DDR_UMCTL2_REGS_DBG0_RESERVED1_SHIFT 5 + +/**** DBG1 register ****/ +/* When 1, uMCTL2 will not de-queue any transactions from the CA ... */ +#define DWC_DDR_UMCTL2_REGS_DBG1_DIS_DQ (1 << 0) + +#define DWC_DDR_UMCTL2_REGS_DBG1_RESERVED_MASK 0xFFFFFFFE +#define DWC_DDR_UMCTL2_REGS_DBG1_RESERVED_SHIFT 1 + +/**** DBGCAM register ****/ +/* High priority read queue depth
FOR DEBUG ONLY */ +#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_HPR_Q_DEPTH_MASK 0x0000003F +#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_HPR_Q_DEPTH_SHIFT 0 + +#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED_MASK 0x000000C0 +#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED_SHIFT 6 +/* Low priority read queue depth
FOR DEBUG ONLY */ +#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_LPR_Q_DEPTH_MASK 0x00003F00 +#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_LPR_Q_DEPTH_SHIFT 8 + +#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED1_MASK 0x0000C000 +#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED1_SHIFT 14 +/* Write queue depth
FOR DEBUG ONLY */ +#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_W_Q_DEPTH_MASK 0x003F0000 +#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_W_Q_DEPTH_SHIFT 16 + +#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED2_MASK 0x00C00000 +#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED2_SHIFT 22 +/* Stall
FOR DEBUG ONLY */ +#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_STALL (1 << 24) + +#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED3_MASK 0xFE000000 +#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED3_SHIFT 25 + +/**** PCCFG register ****/ +/* If set to 1 (enabled), sets co_gs_go2critical_wr and co_gs_go ... */ +#define DWC_DDR_UMCTL2_MP_PCCFG_GO2CRITICAL_EN (1 << 0) + +#define DWC_DDR_UMCTL2_MP_PCCFG_RESERVED_MASK 0x0000000E +#define DWC_DDR_UMCTL2_MP_PCCFG_RESERVED_SHIFT 1 +/* Page match four limit */ +#define DWC_DDR_UMCTL2_MP_PCCFG_PAGEMATCH_LIMIT (1 << 4) + +#define DWC_DDR_UMCTL2_MP_PCCFG_RESERVED1_MASK 0xFFFFFFE0 +#define DWC_DDR_UMCTL2_MP_PCCFG_RESERVED1_SHIFT 5 + +/**** PCFGR_0 register ****/ +/* Determines the initial load value of read aging counters */ +#define DWC_DDR_UMCTL2_MP_PCFGR_0_RD_PORT_PRIORITY_MASK 0x000003FF +#define DWC_DDR_UMCTL2_MP_PCFGR_0_RD_PORT_PRIORITY_SHIFT 0 + +#define DWC_DDR_UMCTL2_MP_PCFGR_0_RESERVED (1 << 10) +/* If set to 1, read transactions with ID not covered by any of ... */ +#define DWC_DDR_UMCTL2_MP_PCFGR_0_READ_REORDER_BYPASS_EN (1 << 11) +/* If set to 1, enables aging function for the read channel of t ... */ +#define DWC_DDR_UMCTL2_MP_PCFGR_0_RD_PORT_AGING_EN (1 << 12) +/* If set to 1, enables the AXI urgent sideband signal (arurgent ... */ +#define DWC_DDR_UMCTL2_MP_PCFGR_0_RD_PORT_URGENT_EN (1 << 13) +/* If set to 1, enables the Page Match feature */ +#define DWC_DDR_UMCTL2_MP_PCFGR_0_RD_PORT_PAGEMATCH_EN (1 << 14) +/* If set to 1, enables reads to be generated as "High Priority ... */ +#define DWC_DDR_UMCTL2_MP_PCFGR_0_RD_PORT_HPR_EN (1 << 15) + +#define DWC_DDR_UMCTL2_MP_PCFGR_0_RESERVED1_MASK 0xFFFF0000 +#define DWC_DDR_UMCTL2_MP_PCFGR_0_RESERVED1_SHIFT 16 + +/**** PCFGW_0 register ****/ +/* Determines the initial load value of write aging counters */ +#define DWC_DDR_UMCTL2_MP_PCFGW_0_WR_PORT_PRIORITY_MASK 0x000003FF +#define DWC_DDR_UMCTL2_MP_PCFGW_0_WR_PORT_PRIORITY_SHIFT 0 + +#define DWC_DDR_UMCTL2_MP_PCFGW_0_RESERVED_MASK 0x00000C00 +#define DWC_DDR_UMCTL2_MP_PCFGW_0_RESERVED_SHIFT 10 +/* If set to 1, enables aging function for the write channel of ... */ +#define DWC_DDR_UMCTL2_MP_PCFGW_0_WR_PORT_AGING_EN (1 << 12) +/* If set to 1, enables the AXI urgent sideband signal (awurgent ... */ +#define DWC_DDR_UMCTL2_MP_PCFGW_0_WR_PORT_URGENT_EN (1 << 13) +/* If set to 1, enables the Page Match feature */ +#define DWC_DDR_UMCTL2_MP_PCFGW_0_WR_PORT_PAGEMATCH_EN (1 << 14) + +#define DWC_DDR_UMCTL2_MP_PCFGW_0_RESERVED1_MASK 0xFFFF8000 +#define DWC_DDR_UMCTL2_MP_PCFGW_0_RESERVED1_SHIFT 15 + +/**** PCFGIDMASKCH0_0 register ****/ +/* Determines the mask used in the ID mapping function for virtu ... */ +#define DWC_DDR_UMCTL2_MP_PCFGIDMASKCH0_0_ID_MASK_MASK 0x003FFFFF +#define DWC_DDR_UMCTL2_MP_PCFGIDMASKCH0_0_ID_MASK_SHIFT 0 + +#define DWC_DDR_UMCTL2_MP_PCFGIDMASKCH0_0_RESERVED_MASK 0xFFC00000 +#define DWC_DDR_UMCTL2_MP_PCFGIDMASKCH0_0_RESERVED_SHIFT 22 + +/**** PCFGIDVALUECH0_0 register ****/ +/* Determines the value used in the ID mapping function for virt ... */ +#define DWC_DDR_UMCTL2_MP_PCFGIDVALUECH0_0_ID_VALUE_MASK 0x003FFFFF +#define DWC_DDR_UMCTL2_MP_PCFGIDVALUECH0_0_ID_VALUE_SHIFT 0 + +#define DWC_DDR_UMCTL2_MP_PCFGIDVALUECH0_0_RESERVED_MASK 0xFFC00000 +#define DWC_DDR_UMCTL2_MP_PCFGIDVALUECH0_0_RESERVED_SHIFT 22 + +#ifdef __cplusplus +} +#endif + +#endif + +/** @} end of DDR group */ diff --git a/target/linux/alpine/files/drivers/edac/al/al_hal_ddr_phy_regs.h b/target/linux/alpine/files/drivers/edac/al/al_hal_ddr_phy_regs.h new file mode 100644 index 00000000000000..43367ca2b0ba07 --- /dev/null +++ b/target/linux/alpine/files/drivers/edac/al/al_hal_ddr_phy_regs.h @@ -0,0 +1,1148 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @addtogroup groupddr + * + * @{ + * @file al_hal_ddr_phy_regs.h + * + * @brief DDR PHY registers + * + */ +#ifndef __AL_HAL_DDR_PHY_REGS_REGS_H__ +#define __AL_HAL_DDR_PHY_REGS_REGS_H__ + +#include "al_hal_ddr_cfg.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* The index of the ECC byte lane */ +#define AL_DDR_PHY_ECC_BYTE_LANE_INDEX 4 + +/* The clock frequency on which the PLL frequency select need to be changed */ +#define AL_DDR_PHY_PLL_FREQ_SEL_MHZ 700 + +#define AL_DDR_PHY_NUM_ZQ_SEGMANTS 3 + +struct al_ddr_phy_zq_regs { + uint32_t PR; /* ZQ Impedance Control Program Register */ + uint32_t DR; /* ZQ Impedance Control Data Register */ + uint32_t SR; /* ZQ Impedance Control Status Register */ + uint32_t reserved; +}; + +struct al_ddr_phy_datx8_regs { + uint32_t GCR[4]; /* General Configuration Registers 0-3 */ + uint32_t GSR[3]; /* General Status Registers 0-2 */ + uint32_t BDLR[7]; /* Bit Delay Line Registers 0-6 */ + uint32_t LCDLR[3]; /* Local Calibrated Delay Line Registers 0-2 */ + uint32_t MDLR; /* Master Delay Line Register */ + uint32_t GTR; /* General Timing Register */ + uint32_t reserved[13]; +}; + +struct al_ddr_phy_regs { + uint32_t RIDR; /* Revision Identification Reg */ + uint32_t PIR; /* PHY Initialization Reg */ + uint32_t PGCR[4]; /* PHY General Configuration Regs 0-3 */ + uint32_t PGSR[2]; /* PHY General Status Regs 0-1 */ + uint32_t PLLCR; /* PLL Control Reg */ + uint32_t PTR[5]; /* PHY Timing Regs 0-4 */ + uint32_t ACMDLR; /* AC Master Delay Line Reg */ + uint32_t ACLCDLR; /* AC Local Calibrated Delay Line Reg */ + uint32_t ACBDLR[10]; /* AC Bit Delay Line Regs 0-9 */ + uint32_t ACIOCR[6]; /* AC I/O Configuration Regs 0-5 */ + uint32_t DXCCR; /* DATX8 Common Configuration Reg */ + uint32_t DSGCR; /* DDR System General Configuration Reg */ + uint32_t DCR; /* DRAM Configuration Reg */ + uint32_t DTPR[4]; /* DRAM Timing Parameters Registesr 0-3 */ + uint32_t MR[4]; /* Mode Regs 0-3 */ + uint32_t ODTCR; /* ODT Configuration Reg */ + uint32_t DTCR; /* Data Training Configuration Reg */ + uint32_t DTAR[4]; /* Data Training Address Register 0-3 */ + uint32_t DTDR[2]; /* Data Training Data Register 0-1 */ + uint32_t DTEDR[2]; /* Data Training Eye Data Register 0-1 */ + uint32_t RDIMMGCR[2]; /* RDIMM General Configuration Register 0-1 */ + uint32_t RDIMMCR[2]; /* RDIMM Control Register 0-1 */ + uint32_t reserved1[0x3D - 0x39]; + uint32_t ODTCTLR; /* ODT Control Reg */ + uint32_t reserved2[0x70 - 0x3E]; + uint32_t BISTRR; /* BIST Run Register */ + uint32_t BISTWCR; /* BIST Word Count Register */ + uint32_t BISTMSKR[3]; /* BIST Mask Register 0-2 */ + uint32_t BISTLSR; /* BIST LFSR Seed Register */ + uint32_t BISTAR[3]; /* BIST Address Register 0-2 */ + uint32_t BISTUDPR; /* BIST User Data Pattern Register */ + uint32_t BISTGSR; /* BIST General Status Register */ + uint32_t BISTWER; /* BIST Word Error Register */ + uint32_t BISTBER[4]; /* BIST Bit Error Register 0-3 */ + uint32_t BISTWCSR; /* BIST Word Count Status Register */ + uint32_t BISTFWR[3]; /* BIST Fail Word Register 0-2 */ + uint32_t reserved3[0x8E - 0x84]; + uint32_t IOVCR[2]; /* IO VREF Control Register 0-1 */ + uint32_t ZQCR; /* ZQ Impedance Control Register */ + struct al_ddr_phy_zq_regs ZQ[AL_DDR_PHY_NUM_ZQ_SEGMANTS]; + uint32_t reserved4[0xA0 - 0x9D]; + struct al_ddr_phy_datx8_regs DATX8[AL_DDR_PHY_NUM_BYTE_LANES]; +}; + +/* Register PGSR0 field iDONE */ +/** + * Initialization Done: Indicates if set that the DDR system initialization has + * completed. This bit is set after all the selected initialization routines in + * PIR register have completed. + */ +#define DWC_DDR_PHY_REGS_PGSR0_IDONE 0x00000001 + +/* Register PGSR0 field ZCERR */ +/** + * Impedance Calibration Error: Indicates if set that there is an error in + * impedance calibration. + */ +#define DWC_DDR_PHY_REGS_PGSR0_ZCERR 0x00100000 + +/* Register PGSR0 field WLERR */ +/** + * Write Leveling Error: Indicates if set that there is an error in write + * leveling. + */ +#define DWC_DDR_PHY_REGS_PGSR0_WLERR 0x00200000 + +/* Register PGSR0 field QSGERR */ +/** + * DQS Gate Training Error: Indicates if set that there is an error in DQS gate + * training. + */ +#define DWC_DDR_PHY_REGS_PGSR0_QSGERR 0x00400000 + +/* Register PGSR0 field WLAERR */ +/** + * Write Leveling Adjustment Error: Indicates if set that there is an error in + * write leveling adjustment. + */ +#define DWC_DDR_PHY_REGS_PGSR0_WLAERR 0x00800000 + +/* Register PGSR0 field RDERR */ +/** + * Read Bit Deskew Error: Indicates if set that there is an error in read bit + * deskew. + */ +#define DWC_DDR_PHY_REGS_PGSR0_RDERR 0x01000000 + +/* Register PGSR0 field WDERR */ +/** + * Write Bit Deskew Error: Indicates if set that there is an error in write bit + * deskew. + */ +#define DWC_DDR_PHY_REGS_PGSR0_WDERR 0x02000000 + +/* Register PGSR0 field REERR */ +/** + * Read Eye Training Error: Indicates if set that there is an error in read eye + * training. + */ +#define DWC_DDR_PHY_REGS_PGSR0_REERR 0x04000000 + +/* Register PGSR0 field WEERR */ +/** + * Write Eye Training Error: Indicates if set that there is an error in write + * eye training. + */ +#define DWC_DDR_PHY_REGS_PGSR0_WEERR 0x08000000 + +/* Register PGSR0 field VTDONE */ +/** + * AC VT Done: Indicates if set that VT compensation calculation has + * been completed for all enabled AC BDLs and LCDL. + */ +#define DWC_DDR_PHY_REGS_PGSR0_VTDONE 0x40000000 + +/* Register PGSR1 field VTSTOP */ +/** + * VT Stop: Indicates if set that the VT calculation logic has stopped + * computing the next values for the VT compensated delay line values. After + * assertion of the PGCR.INHVT, the VTSTOP bit should be read to ensure all VT + * compensation logic has stopped computations before writing to the delay line + * registers. + */ +#define DWC_DDR_PHY_REGS_PGSR1_VTSTOP 0x40000000 + +/* Register PGCR0 field PHYFRST */ +/** + * A write of ‘0’ to this bit resets the AC and DATX8 FIFOs without + * resetting PUB RTL logic. This bit is not self-clearing and a ‘1’ + * must be written to deassert the reset. + */ +#define DWC_DDR_PHY_REGS_PGCR0_PHYFRST 0x04000000 + +/* Register PGCR1 field DLBYPMODE */ +/** + * Controls DDL Bypass Modes. Valid values are: + * 00 = Normal dynamic control + * 01 = All DDLs bypassed + * 10 = No DDLs bypassed + * 11 = Reserved + */ +#define DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_MASK 0x00000030 +#define DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_SHIFT 4 + +#define DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_DYNAMIC \ + (0 << DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_SHIFT) + +#define DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_BYPASS \ + (1 << DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_SHIFT) + +#define DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_NO_BYPASS \ + (2 << DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_SHIFT) + +/* Register PGCR1 field IODDRM */ +/** + * I/O DDR Mode (D3F I/O Only): Selects the DDR mode for the I/Os. + * These bits connect to bits [2:1] of the IOM pin of the SSTL I/O. + * I/O Mode: I/O Mode select + * 000 = DDR2 mode + * 001 = LVCMOS mode + * 010 = DDR3 mode + * 011 = Reserved + * 100 = DDR3L mode + * 101 = Reserved + * 110 = Reserved + * 111 = Reserved + */ +#define DWC_DDR_PHY_REGS_PGCR1_IODDRM_MASK AL_FIELD_MASK(8, 7) +#define DWC_DDR_PHY_REGS_PGCR1_IODDRM_SHIFT 7 +#define DWC_DDR_PHY_REGS_PGCR1_IODDRM_VAL_DDR3 \ + (1 << DWC_DDR_PHY_REGS_PGCR1_IODDRM_SHIFT) + +/* Register PGCR1 field PHYHRST */ +/** + * PHY High-Speed Reset: A write of ‘0’ to this bit resets the AC and DATX8 + * macros without resetting PUBm2 RTL logic. This bit is not self-clearing + * and a ‘1’ must be written to de-assert the reset. + */ +#define DWC_DDR_PHY_REGS_PGCR1_PHYHRST 0x02000000 + +/* Register PGCR1 field INHVT */ +/** + * VT Calculation Inhibit: Inhibits calculation of the next VT compensated + * delay line values. A value of 1 will initiate a stop of the VT compensation + * logic. The bit PGSR1[30] (VSTOP) will be set to a logic 1 when VT + * compensation has stopped. This bit should be set to 1 during writes to the + * delay line registers. A value of 0 will re-enable the VT compensation + * logic. + */ +#define DWC_DDR_PHY_REGS_PGCR1_INHVT 0x04000000 + +/* Register PGCR1 field IOLB */ +/** + * I/O Loop-Back Select: Selects where inside the I/O the loop-back of signals + * happens. Valid values are: + * 0 = Loopback is after output buffer; output enable must be asserted + * 1 = Loopback is before output buffer; output enable is don’t care + */ +#define DWC_DDR_PHY_REGS_PGCR1_IOLB 0x08000000 + +/* Register PGCR3 field RDMODE */ +/** + */ +#define DWC_DDR_PHY_REGS_PGCR3_RDMODE_MASK 0x00000018 +#define DWC_DDR_PHY_REGS_PGCR3_RDMODE_SHIFT 3 + +#define DWC_DDR_PHY_REGS_PGCR3_RDMODE_STATIC_RD_RSP_OFF \ + (0 << DWC_DDR_PHY_REGS_PGCR3_RDMODE_SHIFT) + +#define DWC_DDR_PHY_REGS_PGCR3_RDMODE_STATIC_RD_RSP_CMP \ + (2 << DWC_DDR_PHY_REGS_PGCR3_RDMODE_SHIFT) + +#define DWC_DDR_PHY_REGS_PGCR3_RDMODE_STATIC_RD_RSP_PRG \ + (3 << DWC_DDR_PHY_REGS_PGCR3_RDMODE_SHIFT) + +/* Register PGCR3 field RDDLY */ +/** + */ +#define DWC_DDR_PHY_REGS_PGCR3_RDDLY_MASK 0x000001e0 +#define DWC_DDR_PHY_REGS_PGCR3_RDDLY_SHIFT 5 + +/* Register PGCR3 field GATEDXCTLCLK */ +/** +Enable Clock Gating for DX ctl_clk: Enables, when set, clock gating for power +saving. Valid values are: +0 = Clock gating is disabled . +1 = Clock gating is enabled + */ +#define DWC_DDR_PHY_REGS_PGCR3_GATEDXCTLCLK 0x00002000 + +/* Register PGCR3 field GATEDXDDRCLK */ +/** +Enable Clock Gating for DX ddr_clk: Enables, when set, clock gating for power +saving. Valid values are: +0 = Clock gating is disabled . +1 = Clock gating is enabled + */ +#define DWC_DDR_PHY_REGS_PGCR3_GATEDXDDRCLK 0x00004000 + +/* Register PGCR3 field GATEDXRDCLK */ +/** +Enable Clock Gating for DX rd_clk: Enables, when set, clock gating for power +saving. Valid values are: +0 = Clock gating is disabled . +1 = Clock gating is enabled + */ +#define DWC_DDR_PHY_REGS_PGCR3_GATEDXRDCLK 0x00008000 + +/* Register PLLCR field FRQSEL */ +/** + * PLL Frequency Select: Selects the operating range of the PLL. + * 00 = PLL reference clock (ctl_clk/REF_CLK) ranges from 335MHz to 533MHz + * 01 = PLL reference clock (ctl_clk/REF_CLK) ranges from 225MHz to 385MHz + * 10 = Reserved + * 11 = PLL reference clock (ctl_clk/REF_CLK) ranges from 166MHz to 275MHz + */ +#define DWC_DDR_PHY_REGS_PLLCR_FRQSEL_MASK 0x00180000 +#define DWC_DDR_PHY_REGS_PLLCR_FRQSEL_SHIFT 19 +#define DWC_DDR_PHY_REGS_PLLCR_FRQSEL_335MHZ_533MHz \ + (0x0 << DWC_DDR_PHY_REGS_PLLCR_FRQSEL_SHIFT) +#define DWC_DDR_PHY_REGS_PLLCR_FRQSEL_225MHZ_385MHz \ + (0x1 << DWC_DDR_PHY_REGS_PLLCR_FRQSEL_SHIFT) +#define DWC_DDR_PHY_REGS_PLLCR_FRQSEL_166MHZ_275MHz \ + (0x3 << DWC_DDR_PHY_REGS_PLLCR_FRQSEL_SHIFT) + +/* Register ACIOCR0 field ACPDR */ +/** + * AC Power Down Receiver: Powers down, when set, the input receiver on the I/O for + * RAS#, CAS#, WE#, BA[2:0], and A[15:0] pins. + */ +#define DWC_DDR_PHY_REGS_ACIOCR0_ACPDR 0x00000010 + +/* Register ACIOCR0 field CKPDR */ +/** + * CK Power Down Receiver: Powers down, when set, the input receiver on the I/O for + * CK[0], CK[1], CK[2], and CK[3] pins, respectively. + */ +#define DWC_DDR_PHY_REGS_ACIOCR0_CKPDR_MASK 0x00003c00 +#define DWC_DDR_PHY_REGS_ACIOCR0_CKPDR_SHIFT 10 + +/* Register ACIOCR0 field RANKPDR */ +/** + * Rank Power Down Receiver: Powers down, when set, the input receiver on the I/O + * CKE[3:0], ODT[3:0], and CS#[3:0] pins. RANKPDR[0] controls the power down for + * CKE[0], ODT[0], and CS#[0], RANKPDR[1] controls the power down for CKE[1], + * ODT[1], and CS#[1], and so on. + */ +#define DWC_DDR_PHY_REGS_ACIOCR0_RANKPDR_MASK 0x03c00000 +#define DWC_DDR_PHY_REGS_ACIOCR0_RANKPDR_SHIFT 22 + +/* Register ACIOCR0 field RSTPDR */ +/** + * SDRAM Reset Power Down Receiver: Powers down, when set, the input receiver + * on the I/O for SDRAM RST# pin. + */ +#define DWC_DDR_PHY_REGS_ACIOCR0_RSTPDR 0x10000000 + +/* Register DSGCR field PUREN */ +/** + * PHY Update Request Enable: Specifies if set, that the PHY should issue + * PHY-initiated update request when there is DDL VT drift. + */ +#define DWC_DDR_PHY_REGS_DSGCR_PUREN 0x00000001 + + +/* Register DSGCR field DQSGX */ +/** + * DQSGX DQS Gate Extension: Specifies if set that the read DQS gating mustgate + * will be extended. This should be set ONLY when used with DQS + * pulldown and then re-centered, i.e. one clock cycle extension on either + * side.DQSn pullup. Allowable settings are: + * 00 = do not extend the gate + * 01 = extend the gate by ½ tCK in both directions (but never earlier than + * zero read latency) + * 10 = extend the gate earlier by ½ tCK and later by 2 * tCK (to facilitate + * LPDDR2/LPDDR3 usage without training for systems supporting upto + * 800Mbps) + * 11 = extend the gate earlier by ½ tCK and later by 3 * tCK (to facilitate + * LPDDR2/LPDDR3 usage without training for systems supporting upto + * 1600Mbps)) + */ +#define DWC_DDR_PHY_REGS_DSGCR_DQSGX_MASK 0x000000c0 +#define DWC_DDR_PHY_REGS_DSGCR_DQSGX_SHIFT 6 + +/* Register DSGCR field RRMODE */ +/** + * Rise-to-Rise Mode: Indicates if set that the PHY mission mode is configured + * to run in rise-to-rise mode. Otherwise if not set the PHY mission mode is + * running in rise-to-fall mode. + */ +#define DWC_DDR_PHY_REGS_DSGCR_RRMODE 0x00040000 + +/* Register DCR field NOSRA */ +/** + * No Simultaneous Rank Access: Specifies if set that simultaneous rank access + * on the same clock cycle is not allowed. This means that multiple chip select + * signals should not be asserted at the same time. This may be required on + * some DIMM systems. + */ +#define DWC_DDR_PHY_REGS_DCR_NOSRA 0x08000000 + +/* Register DCR field DDR2T */ +/** + * DDR 2T Timing: Indicates if set that 2T timing should be used by PUBm2 + * internally generated SDRAM transactions. + */ +#define DWC_DDR_PHY_REGS_DCR_DDR2T 0x10000000 + +/* Register DCR field UDIMM */ +/** + * Un-buffered DIMM Address Mirroring: Indicates if set that there is address + * mirroring on the second rank of an un-buffered DIMM (the rank connected to + * CS#[1]). In this case, the PUBm2 re-scrambles the bank and address when + * sending mode register commands to the second rank. This only applies to + * PUBm2 internal SDRAM transactions. Transactions generated by the controller + * must make its own adjustments when using an un-buffered DIMM. DCR[NOSRA] + * must be set if address mirroring is enabled. + */ +#define DWC_DDR_PHY_REGS_DCR_UDIMM 0x20000000 + +/* Register DTPR0 field t_rtp */ +/* Internal read to precharge command delay. Valid values are 2 to 15 */ +#define DWC_DDR_PHY_REGS_DTPR0_T_RTP_MASK 0x0000000f +#define DWC_DDR_PHY_REGS_DTPR0_T_RTP_SHIFT 0 + +/* Register DTPR0 field t_wtr */ +/* Internal write to read command delay. Valid values are 1 to 15 */ +#define DWC_DDR_PHY_REGS_DTPR0_T_WTR_MASK 0x000000f0 +#define DWC_DDR_PHY_REGS_DTPR0_T_WTR_SHIFT 4 + +/* Register DTPR0 field t_rp */ +/* Precharge command period: The minimum time between a precharge command +and any other command. Note that the Controller automatically derives tRPA for +8-bank DDR2 devices by adding 1 to tRP. Valid values are 2 to 15. +In LPDDR3 mode, PUBm2 adds an offset of 8 to the register value, so valid range +is 8 to 2 */ +#define DWC_DDR_PHY_REGS_DTPR0_T_RP_MASK 0x00000f00 +#define DWC_DDR_PHY_REGS_DTPR0_T_RP_SHIFT 8 + +/* Register DTPR0 field t_rcd */ +/* Activate to read or write delay. Minimum time from when an activate command + * is issued to when a read or write to the activated row can be issued. Valid + * values are 2 to 15. In LPDDR3 mode, PUBm2 adds an offset of 8 to the register + * value, so valid range is 8 to 23. + */ +#define DWC_DDR_PHY_REGS_DTPR0_T_RCD_MASK 0x0000f000 +#define DWC_DDR_PHY_REGS_DTPR0_T_RCD_SHIFT 12 + +/* Register DTPR0 field t_ras_min */ +/* Activate to precharge command delay. Valid values are 2 to 63 */ +#define DWC_DDR_PHY_REGS_DTPR0_T_RAS_MIN_MASK 0x003f0000 +#define DWC_DDR_PHY_REGS_DTPR0_T_RAS_MIN_SHIFT 16 + +/* Register DTPR0 field t_rrd */ +/* Activate to activate command delay (different banks). Valid values are 1 to + * 15 + */ +#define DWC_DDR_PHY_REGS_DTPR0_T_RRD_MASK 0x03c00000 +#define DWC_DDR_PHY_REGS_DTPR0_T_RRD_SHIFT 22 + +/* Register DTPR0 field t_rc */ +/* Activate to activate command delay (same bank). Valid values are 2 to 63 */ +#define DWC_DDR_PHY_REGS_DTPR0_T_RC_MASK 0xfc000000 +#define DWC_DDR_PHY_REGS_DTPR0_T_RC_SHIFT 26 + +/* Register DTPR1 field T_AOND */ +/* Read ODT turn-on delay */ +#define DWC_DDR_PHY_REGS_DTPR1_T_AOND_MASK 0xc0000000 +#define DWC_DDR_PHY_REGS_DTPR1_T_AOND_SHIFT 30 + +/* Register DTPR3 field T_OFDX */ +/* ODT turn-on length (read and write) */ +#define DWC_DDR_PHY_REGS_DTPR3_T_OFDX_MASK 0xe0000000 +#define DWC_DDR_PHY_REGS_DTPR3_T_OFDX_SHIFT 29 + +/* Register ODTCR field RDODT0 */ +/** + * Read ODT: Specifies whether ODT should be enabled (‘1’) or disabled (‘0’) on + * each of the up to four ranks when a read command is sent to rank n. RDODT0, + * RDODT1, RDODT2, and RDODT3 specify ODT settings when a read is to rank 0, + * rank 1, rank 2, and rank 3, respectively. The four bits of each field each + * represent a rank, the LSB being rank 0 and the MSB being rank 3. + * Default is to disable ODT during reads. +*/ +#define DWC_DDR_PHY_REGS_ODTCR_RDODT0_MASK 0x0000000F +#define DWC_DDR_PHY_REGS_ODTCR_RDODT0_SHIFT 0 + +#define DWC_DDR_PHY_REGS_ODTCR_RDODT1_MASK 0x000000F0 +#define DWC_DDR_PHY_REGS_ODTCR_RDODT1_SHIFT 4 + +#define DWC_DDR_PHY_REGS_ODTCR_RDODT2_MASK 0x00000F00 +#define DWC_DDR_PHY_REGS_ODTCR_RDODT2_SHIFT 8 + +#define DWC_DDR_PHY_REGS_ODTCR_RDODT3_MASK 0x0000F000 +#define DWC_DDR_PHY_REGS_ODTCR_RDODT3_SHIFT 12 + +/* Register ODTCR field WRODT0 */ +/** + * Write ODT: Specifies whether ODT should be enabled (‘1’) or disabled (‘0’) on + * each of the up to four ranks when a write command is sent to rank n. WRODT0, + * WRODT1, WRODT2, and WRODT3 specify ODT settings when a write is to rank 0, + * rank 1, rank 2, and rank 3, respectively. The four bits of each field each + * represent a rank, the LSB being rank 0 and the MSB being rank 3. + * Default is to enable ODT only on rank being written to. +*/ +#define DWC_DDR_PHY_REGS_ODTCR_WRODT0_MASK 0x000F0000 +#define DWC_DDR_PHY_REGS_ODTCR_WRODT0_SHIFT 16 + +#define DWC_DDR_PHY_REGS_ODTCR_WRODT1_MASK 0x00F00000 +#define DWC_DDR_PHY_REGS_ODTCR_WRODT1_SHIFT 20 + +#define DWC_DDR_PHY_REGS_ODTCR_WRODT2_MASK 0x0F000000 +#define DWC_DDR_PHY_REGS_ODTCR_WRODT2_SHIFT 24 + +#define DWC_DDR_PHY_REGS_ODTCR_WRODT3_MASK 0xF0000000 +#define DWC_DDR_PHY_REGS_ODTCR_WRODT3_SHIFT 28 + +/* Register DTCR field DTRPTN */ +/** + * Data Training Repeat Number: Repeat number used to confirm stability of DDR + * write or read +*/ +#define DWC_DDR_PHY_REGS_DTCR_DTRPTN_MASK 0x0000000f +#define DWC_DDR_PHY_REGS_DTCR_DTRPTN_SHIFT 0 + +/* Register DTCR field DTRANK */ +/** + * Data Training Rank: Selects the SDRAM rank to be used during data bit deskew + * and eye centering. +*/ +#define DWC_DDR_PHY_REGS_DTCR_DTRANK_MASK 0x00000030 +#define DWC_DDR_PHY_REGS_DTCR_DTRANK_SHIFT 4 + +/* Register DTCR field DTMPR */ +/** + * Data Training Using MPR (DDR3 Only): Specifies, if set, that DQS gate + * training should use the SDRAM Multi-Purpose Register (MPR) register. + * Otherwise datatraining is performed by first writing to some locations in + * the SDRAM and then reading them back. + */ +#define DWC_DDR_PHY_REGS_DTCR_DTMPR 0x00000040 + +/* Register DTCR field DTDBS */ +/** + * Data Training Debug Byte Select: Selects the byte during data training debug + * mode. + */ +#define DWC_DDR_PHY_REGS_DTCR_DTDBS_MASK 0x000f0000 +#define DWC_DDR_PHY_REGS_DTCR_DTDBS_SHIFT 16 +#define DWC_DDR_PHY_REGS_DTCR_DTDBS(i) \ + ((i) << DWC_DDR_PHY_REGS_DTCR_DTDBS_SHIFT) + +/* Register DTCR field DTEXG */ +/** + * Data Training with Early/Extended Gate: Specifies if set that the DQS gate + * training should be performed with an early/extended gate as specified in + * DSGCR.DQSGX. + */ +#define DWC_DDR_PHY_REGS_DTCR_DTEXG 0x00800000 + +/* Register DTCR field RANKEN */ +/** + * Rank Enable: Specifies the ranks that are enabled for data-training. Bit 0 + * controls rank 0, bit 1 controls rank 1, bit 2 controls rank 2, and bit 3 + * controls rank 3. Setting the bit to '1' enables the rank, and setting it to + * '0' disables the rank. + */ +#define DWC_DDR_PHY_REGS_DTCR_RANKEN_MASK 0x0f000000 +#define DWC_DDR_PHY_REGS_DTCR_RANKEN_SHIFT 24 + +/* Register DTEDR1 field DTRLMN */ +/* Data Training RDQS LCDL Minimum */ +#define DWC_DDR_PHY_REGS_DTEDR1_DTRLMN_MASK 0x000000ff +#define DWC_DDR_PHY_REGS_DTEDR1_DTRLMN_SHIFT 0 + +/* Register DTEDR1 field DTRLMX */ +/* Data Training RDQS LCDL Maximum */ +#define DWC_DDR_PHY_REGS_DTEDR1_DTRLMX_MASK 0x0000ff00 +#define DWC_DDR_PHY_REGS_DTEDR1_DTRLMX_SHIFT 8 + +/* Register DTEDR1 field DTRBMN */ +/* Data Training Read BDL Shift Minimum */ +#define DWC_DDR_PHY_REGS_DTEDR1_DTRBMN_MASK 0x00ff0000 +#define DWC_DDR_PHY_REGS_DTEDR1_DTRBMN_SHIFT 16 + +/* Register DTEDR1 field DTRBMX */ +/* Data Training Read BDL Shift Minimum */ +#define DWC_DDR_PHY_REGS_DTEDR1_DTRBMX_MASK 0xff000000 +#define DWC_DDR_PHY_REGS_DTEDR1_DTRBMX_SHIFT 24 + +/* Register RDIMMGCR0 field RDIMM */ +/** + * Registered DIMM: Indicates if set that a registered DIMM is used. In this + * case, the PUBm2 increases the SDRAM write and read latencies (WL/RL) by 1 + * and also enforces that accesses adhere to RDIMM buffer chip. This only + * applies to PUBm2 internal SDRAM transactions. Transactions generated by the + * controller must make its own adjustments to WL/RL when using a registered + * DIMM. The DCR.NOSRA register bit must be set to '1' if using the standard + * RDIMM buffer chip so that normal DRAM accesses do not assert multiple chip + * select bits at the same time. + */ +#define DWC_DDR_PHY_REGS_RDIMMGCR0_RDIMM 0x00000001 + +/* Register ODTCTLR field FRCEN */ +/** + * ODT force value enable : when this field is set, the ODT + * value is taken from the FRCVAL field. + * One bit for each rank. + */ +#define DWC_DDR_PHY_REGS_ODTCTLR_FRCEN_MASK 0x0000000f +#define DWC_DDR_PHY_REGS_ODTCTLR_FRCEN_SHIFT 0 + +/* Register ODTCTLR field FRCVAL */ +/** + * ODT force value : when FRCEN field is set, the ODT + * value is taken from this field. + * One bit for each rank. + */ +#define DWC_DDR_PHY_REGS_ODTCTLR_FRCVAL_MASK 0x000000f0 +#define DWC_DDR_PHY_REGS_ODTCTLR_FRCVAL_SHIFT 4 + +/* Register BISTRR field BINST */ +/** +Selects the BIST instruction to be executed: Valid values are: +000 = NOP: No operation +001 = Run: Triggers the running of the BIST. +010 = Stop: Stops the running of the BIST. +011 = Reset: Resets all BIST run-time registers, such as error counters. +100 – 111 Reserved + */ +#define DWC_DDR_PHY_REGS_BISTRR_BINST_MASK 0x00000007 +#define DWC_DDR_PHY_REGS_BISTRR_BINST_SHIFT 0 +#define DWC_DDR_PHY_REGS_BISTRR_BINST_NOP \ + (0x0 << DWC_DDR_PHY_REGS_BISTRR_BINST_SHIFT) +#define DWC_DDR_PHY_REGS_BISTRR_BINST_RUN \ + (0x1 << DWC_DDR_PHY_REGS_BISTRR_BINST_SHIFT) +#define DWC_DDR_PHY_REGS_BISTRR_BINST_STOP \ + (0x2 << DWC_DDR_PHY_REGS_BISTRR_BINST_SHIFT) +#define DWC_DDR_PHY_REGS_BISTRR_BINST_RESET \ + (0x3 << DWC_DDR_PHY_REGS_BISTRR_BINST_SHIFT) + +/* Register BISTRR field BMODE */ +/** +BIST Mode: Selects the mode in which BIST is run. Valid values are: +0 = Loopback mode: Address, commands and data loop back at the PHY I/Os. +1 = DRAM mode: Address, commands and data go to DRAM for normal memory +accesses. +*/ +#define DWC_DDR_PHY_REGS_BISTRR_BMODE_MASK 0x00000008 +#define DWC_DDR_PHY_REGS_BISTRR_BMODE_SHIFT 3 +#define DWC_DDR_PHY_REGS_BISTRR_BMODE_LOOPBACK \ + (0x0 << DWC_DDR_PHY_REGS_BISTRR_BMODE_SHIFT) +#define DWC_DDR_PHY_REGS_BISTRR_BMODE_DRAM \ + (0x1 << DWC_DDR_PHY_REGS_BISTRR_BMODE_SHIFT) + +/* Register BISTRR field BDXEN */ +/** + * DATX8 Enable: Enables the running of BIST on the data byte lane PHYs. + * This bit is exclusive with BACEN, i.e. both cannot be set to '1' at the same + * time. + */ +#define DWC_DDR_PHY_REGS_BISTRR_BDXEN 0x00004000 + +/* Register BISTRR field BACEN */ +/** + * BIST AC Enable: Enables the running of BIST on the address/command lane PHY. + * This bit is exclusive with BDXEN, i.e. both cannot be set to '1' at the same + * time. + */ +#define DWC_DDR_PHY_REGS_BISTRR_BACEN 0x00008000 + +/* Register BISTRR field BDPAT */ +/** +BIST Data Pattern: Selects the data pattern used during BIST. Valid values are: +00 = Walking 0 +01 = Walking 1 +10 = LFSR-based pseudo-random +11 = User programmable (Not valid for AC loopback). +*/ +#define DWC_DDR_PHY_REGS_BISTRR_BDPAT_MASK 0x00060000 +#define DWC_DDR_PHY_REGS_BISTRR_BDPAT_SHIFT 17 +#define DWC_DDR_PHY_REGS_BISTRR_BDPAT_WALKING_0 \ + (0x0 << DWC_DDR_PHY_REGS_BISTRR_BDPAT_SHIFT) +#define DWC_DDR_PHY_REGS_BISTRR_BDPAT_WALKING_1 \ + (0x1 << DWC_DDR_PHY_REGS_BISTRR_BDPAT_SHIFT) +#define DWC_DDR_PHY_REGS_BISTRR_BDPAT_LFSR \ + (0x2 << DWC_DDR_PHY_REGS_BISTRR_BDPAT_SHIFT) +#define DWC_DDR_PHY_REGS_BISTRR_BDPAT_USER \ + (0x3 << DWC_DDR_PHY_REGS_BISTRR_BDPAT_SHIFT) + +/* Register BISTRR field BDXSEL */ +/** +BIST DATX8 Select: Select the byte lane for comparison of loopback/read data. +Valid values are 0 to 8. +*/ +#define DWC_DDR_PHY_REGS_BISTRR_BDXSEL_MASK 0x00780000 +#define DWC_DDR_PHY_REGS_BISTRR_BDXSEL_SHIFT 19 +#define DWC_DDR_PHY_REGS_BISTRR_BDXSEL(val) \ + ((val) << DWC_DDR_PHY_REGS_BISTRR_BDXSEL_SHIFT) + +/* Register BISTWCR field BWCNT */ +/** +BIST Word Count: Indicates the number of words to generate during BIST. This +must be a multiple of DRAM burst length (BL) divided by 2, e.g. for BL=8, valid +values are 4, 8, 12, 16, and so on. +*/ +#define DWC_DDR_PHY_REGS_BISTWCR_BWCNT_MASK 0x0000ffff +#define DWC_DDR_PHY_REGS_BISTWCR_BWCNT_SHIFT 0 +#define DWC_DDR_PHY_REGS_BISTWCR_BWCNT(cnt) \ + ((cnt) << DWC_DDR_PHY_REGS_BISTWCR_BWCNT_SHIFT) + +/* Register BISTAR0 field BCOL */ +/** + * BIST Column Address: Selects the SDRAM column address to be used during + * BIST. The lower bits of this address must be "0000" for BL16, "000" for BL8, + * "00" for BL4 and "0" for BL2. + */ +#define DWC_DDR_PHY_REGS_BISTAR0_BCOL_MASK 0x00000fff +#define DWC_DDR_PHY_REGS_BISTAR0_BCOL_SHIFT 0 +#define DWC_DDR_PHY_REGS_BISTAR0_BCOL(val) \ + ((val) << DWC_DDR_PHY_REGS_BISTAR0_BCOL_SHIFT) + +/* Register BISTAR0 field BROW */ +/** +BIST Row Address: Selects the SDRAM row address to be used during BIST +*/ +#define DWC_DDR_PHY_REGS_BISTAR0_BROW_MASK 0x0ffff000 +#define DWC_DDR_PHY_REGS_BISTAR0_BROW_SHIFT 12 +#define DWC_DDR_PHY_REGS_BISTAR0_BROW(val) \ + ((val) << DWC_DDR_PHY_REGS_BISTAR0_BROW_SHIFT) + +/* Register BISTAR0 field BBANK */ +/** +BIST Bank Address: Selects the SDRAM bank address to be used during BIST. +*/ +#define DWC_DDR_PHY_REGS_BISTAR0_BBANK_MASK 0x70000000 +#define DWC_DDR_PHY_REGS_BISTAR0_BBANK_SHIFT 28 +#define DWC_DDR_PHY_REGS_BISTAR0_BBANK(val) \ + ((val) << DWC_DDR_PHY_REGS_BISTAR0_BBANK_SHIFT) + +/* Register BISTAR1 field BRANK */ +/** +BIST Rank: Selects the SDRAM rank to be used during BIST. Valid values range +from 0 to maximum ranks minus 1. +*/ +#define DWC_DDR_PHY_REGS_BISTAR1_BRANK_MASK 0x00000003 +#define DWC_DDR_PHY_REGS_BISTAR1_BRANK_SHIFT 0 +#define DWC_DDR_PHY_REGS_BISTAR1_BRANK(val) \ + ((val) << DWC_DDR_PHY_REGS_BISTAR1_BRANK_SHIFT) + +/* Register BISTAR1 field BMRANK */ +/** +BIST Maximum Rank: Specifies the maximum SDRAM rank to be used during BIST. +The default value is set to maximum ranks minus 1. +*/ +#define DWC_DDR_PHY_REGS_BISTAR1_BMRANK_MASK 0x0000000c +#define DWC_DDR_PHY_REGS_BISTAR1_BMRANK_SHIFT 2 +#define DWC_DDR_PHY_REGS_BISTAR1_BMRANK(val) \ + ((val) << DWC_DDR_PHY_REGS_BISTAR1_BMRANK_SHIFT) + +/* Register BISTAR1 field BAINC */ +/** + * BIST Address Increment: Selects the value by which the SDRAM address is + * incremented for each write/read access. This value must be at the beginning + * of a burst boundary, i.e. the lower bits must be "0000" for BL16, "000" for + * BL8, "00" for BL4 and "0" for BL2. +*/ +#define DWC_DDR_PHY_REGS_BISTAR1_BAINC_MASK 0x0000fff0 +#define DWC_DDR_PHY_REGS_BISTAR1_BAINC_SHIFT 4 +#define DWC_DDR_PHY_REGS_BISTAR1_BAINC(val) \ + ((val) << DWC_DDR_PHY_REGS_BISTAR1_BAINC_SHIFT) + +/* Register BISTAR2 field BMCOL */ +/** +BIST Maximum Column Address: Specifies the maximum SDRAM column address +to be used during BIST before the address increments to the next row. +*/ +#define DWC_DDR_PHY_REGS_BISTAR2_BMCOL_MASK 0x00000fff +#define DWC_DDR_PHY_REGS_BISTAR2_BMCOL_SHIFT 0 +#define DWC_DDR_PHY_REGS_BISTAR2_BMCOL(val) \ + ((val) << DWC_DDR_PHY_REGS_BISTAR2_BMCOL_SHIFT) + +/* Register BISTAR2 field BMROW */ +/** +BIST Maximum Row Address: Specifies the maximum SDRAM row address to be +used during BIST before the address increments to the next bank. +*/ +#define DWC_DDR_PHY_REGS_BISTAR2_BMROW_MASK 0x0ffff000 +#define DWC_DDR_PHY_REGS_BISTAR2_BMROW_SHIFT 12 +#define DWC_DDR_PHY_REGS_BISTAR2_BMROW(val) \ + ((val) << DWC_DDR_PHY_REGS_BISTAR2_BMROW_SHIFT) + +/* Register BISTAR2 field BMBANK */ +/** +BIST Maximum Bank Address: Specifies the maximum SDRAM bank address to be +used during BIST before the address increments to the next rank. +*/ +#define DWC_DDR_PHY_REGS_BISTAR2_BMBANK_MASK 0x70000000 +#define DWC_DDR_PHY_REGS_BISTAR2_BMBANK_SHIFT 28 +#define DWC_DDR_PHY_REGS_BISTAR2_BMBANK(val) \ + ((val) << DWC_DDR_PHY_REGS_BISTAR2_BMBANK_SHIFT) + +/* Register BISTUDPR field BUDP0 */ +/** + * BIST User Data Pattern 0: Data to be applied on even DQ pins during BIST. + */ +#define DWC_DDR_PHY_REGS_BISTUDPR_BUDP0_MASK 0x0000ffff +#define DWC_DDR_PHY_REGS_BISTUDPR_BUDP0_SHIFT 0 +#define DWC_DDR_PHY_REGS_BISTUDPR_BUDP0(val) \ + ((val) << DWC_DDR_PHY_REGS_BISTUDPR_BUDP0_SHIFT) + +/* Register BISTUDPR field BUDP1 */ +/** + * BIST User Data Pattern 1: Data to be applied on odd DQ pins during BIST. + */ +#define DWC_DDR_PHY_REGS_BISTUDPR_BUDP1_MASK 0xffff0000 +#define DWC_DDR_PHY_REGS_BISTUDPR_BUDP1_SHIFT 16 +#define DWC_DDR_PHY_REGS_BISTUDPR_BUDP1(val) \ + ((val) << DWC_DDR_PHY_REGS_BISTUDPR_BUDP1_SHIFT) + +/* Register BISTGSR field BDONE */ +/** +BIST Done: Indicates if set that the BIST has finished executing. This bit is reset to +zero when BIST is triggered. +*/ +#define DWC_DDR_PHY_REGS_BISTGSR_BDONE 0x00000001 + +/* Register BISTGSR field BACERR */ +/** +BIST Address/Command Error: indicates if set that there is a data comparison error +in the address/command lane. +*/ +#define DWC_DDR_PHY_REGS_BISTGSR_BACERR 0x00000002 + +/* Register BISTGSR field BDXERR */ +/** +BIST Data Error: indicates if set that there is a data comparison error in the byte +lane. +*/ +#define DWC_DDR_PHY_REGS_BISTGSR_BDXERR 0x00000004 + +/* Register DXnGCR0 field DXEN */ +/** + * Data Byte Enable: Enables if set the data byte. Setting this bit to ‘0’ + * disables the byte, i.e. the byte is not used in PHY initialization or + * training and is ignored during SDRAM read/write operations. + */ +#define DWC_DDR_PHY_REGS_DXNGCR0_DXEN 0x00000001 + +/* Register DXnGCR0 field DQSGOE */ +/** +DQSG Output Enable: Enables, when set, the output driver (OE pin)on the I/O for +DQS gate. + */ +#define DWC_DDR_PHY_REGS_DXNGCR0_DQSGOE 0x00000004 + +/* Register DXnGCR0 field DQSGODT */ +/** +DQSG On-Die Termination: Enables, when set, the on-die termination (TE pin)on +the I/O for DQS gate. Note that in typical usage, DQSGOE will always be on, +rendering this control bit meaningless. + */ +#define DWC_DDR_PHY_REGS_DXNGCR0_DQSGODT 0x00000008 + +/* Register DXnGCR0 field DQSGPDD */ +/** +DQSG Power Down Driver: Powers down, if set, the output driver on the I/O for +DQS gate. This bit is ORed with the common PDD configuration bit. + */ +#define DWC_DDR_PHY_REGS_DXNGCR0_DQSGPDD 0x00000010 + +/* Register DXnGCR0 field DQSGPDR */ +/** +DQSG Power Down Receiver: Powers down, if set, the input receiver on the I/O for +DQS gate. This bit is ORed with the common PDR configuration bit. + */ +#define DWC_DDR_PHY_REGS_DXNGCR0_DQSGPDR 0x00000020 + +/* Register DXnGCR0 field DQSRPD */ +/** +DQSR Power Down: Powers down, if set, the PDQSR cell. This bit is ORed with the +common PDR configuration bit + */ +#define DWC_DDR_PHY_REGS_DXNGCR0_DQSRPD 0x00000040 + +/* Register DXnGCR0 field PLLPD */ +/** + * PLL Power Down: Puts the byte PLL in power down mode by driving the PLL + * power down pin. This bit is not self-clearing and a '0' must be written to + * de-assert the power-down. This bit is ORed with the global PLLPD + * configuration bit + */ +#define DWC_DDR_PHY_REGS_DXNGCR0_PLLPD 0x00020000 + +/* Register DXnGCR0 field WLRKEN */ +/* Write Level Rank Enable: Specifies the ranks that should be write leveled + * for this byte. Write leveling responses from ranks that are not enabled for + * write leveling for a particular byte are ignored and write leveling is + * flagged as done for these ranks. WLRKEN[0] enables rank 0, [1] enables rank + * 1, [2] enables rank 2, and [3] enables rank 3. + */ +#define DWC_DDR_PHY_REGS_DXNGCR0_WLRKEN_MASK 0x3c000000 +#define DWC_DDR_PHY_REGS_DXNGCR0_WLRKEN_SHIFT 26 + +/* Register DXnGCR0 field MDLEN */ +/** +Master Delay Line Enable: Enables, if set, the DATX8 master delay line calibration +to perform subsequent period measurements following the initial period +measurements that are performed after reset or on when calibration is manually +triggered. These additional measurements are accumulated and filtered as long as +this bit remains high. This bit is combined with the common DATX8 MDL enable bit + */ +#define DWC_DDR_PHY_REGS_DXNGCR0_MDLEN 0x40000000 + +/* Register DXnGCR0 field CALBYP */ +/** +Calibration Bypass: Prevents, if set, period measurement calibration from +automatically triggering after PHY initialization. + */ +#define DWC_DDR_PHY_REGS_DXNGCR0_CALBYP 0x80000000 + +/* Register DXnGCR3 field DSPDRMODE */ +/** + * Enables the PDR mode values for DQS. + * 00 : PDR Dynamic + * 01 : PDR always ON + * 10 : PDR always OFF + * 11 : Reserved + */ +#define DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_MASK 0x0000000c +#define DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_SHIFT 2 +#define DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_PDR_DYNAMIC \ + (0x0 << DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_SHIFT) +#define DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_PDR_ALWAYS_ON \ + (0x1 << DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_SHIFT) +#define DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_PDR_ALWAYS_OFF \ + (0x2 << DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_SHIFT) + +/* Register DXnLCDLR0 field R0WLD */ +/** Rank 0 Write Leveling Delay: Rank 0 delay select for the write leveling + * (WL) LCDL + */ +#define DWC_DDR_PHY_REGS_DXNLCDLR0_R0WLD_MASK 0x000000ff +#define DWC_DDR_PHY_REGS_DXNLCDLR0_R0WLD_SHIFT 0 + +/* Register DXnLCDLR0 field R1WLD */ +/** Rank 0 Write Leveling Delay: Rank 1 delay select for the write leveling + * (WL) LCDL + */ +#define DWC_DDR_PHY_REGS_DXNLCDLR0_R1WLD_MASK 0x0000ff00 +#define DWC_DDR_PHY_REGS_DXNLCDLR0_R1WLD_SHIFT 8 + +/* Register DXnLCDLR0 field R2WLD */ +/** Rank 0 Write Leveling Delay: Rank 2 delay select for the write leveling + * (WL) LCDL + */ +#define DWC_DDR_PHY_REGS_DXNLCDLR0_R2WLD_MASK 0x00ff0000 +#define DWC_DDR_PHY_REGS_DXNLCDLR0_R2WLD_SHIFT 16 + +/* Register DXnLCDLR0 field R3WLD */ +/** Rank 0 Write Leveling Delay: Rank 3 delay select for the write leveling + * (WL) LCDL + */ +#define DWC_DDR_PHY_REGS_DXNLCDLR0_R3WLD_MASK 0xff000000 +#define DWC_DDR_PHY_REGS_DXNLCDLR0_R3WLD_SHIFT 24 + +/* Register DXnLCDLR1 field WDQD */ +/* Write Data Delay: Delay select for the write data (WDQ) LCDL. */ +#define DWC_DDR_PHY_REGS_DXNLCDLR1_WDQD_MASK 0x000000ff +#define DWC_DDR_PHY_REGS_DXNLCDLR1_WDQD_SHIFT 0 + +/* Register DXnLCDLR1 field RDQSD */ +/* Read DQS Delay: Delay select for the read DQS (RDQS) LCDL. */ +#define DWC_DDR_PHY_REGS_DXNLCDLR1_RDQSD_MASK 0x0000ff00 +#define DWC_DDR_PHY_REGS_DXNLCDLR1_RDQSD_SHIFT 8 + +/* Register DXnLCDLR1 field RDQSND */ +/* Read DQSN Delay: Delay select for the read DQSN (RDQS) LCDL. */ +#define DWC_DDR_PHY_REGS_DXNLCDLR1_RDQSND_MASK 0x00ff0000 +#define DWC_DDR_PHY_REGS_DXNLCDLR1_RDQSND_SHIFT 16 + +/* Register DXnLCDLR2 field R0DQSGD */ +/** Rank 0 Read DQS Gating Delay: Rank 0 delay select for the read DQS gating + * (DQSG) LCDL. + */ +#define DWC_DDR_PHY_REGS_DXNLCDLR2_R0DQSGD_MASK 0x000000ff +#define DWC_DDR_PHY_REGS_DXNLCDLR2_R0DQSGD_SHIFT 0 + +/* Register DXnLCDLR2 field R1DQSGD */ +/** Rank 1 Read DQS Gating Delay: Rank 1 delay select for the read DQS gating + * (DQSG) LCDL. + */ +#define DWC_DDR_PHY_REGS_DXNLCDLR2_R1DQSGD_MASK 0x0000ff00 +#define DWC_DDR_PHY_REGS_DXNLCDLR2_R1DQSGD_SHIFT 8 + +/* Register DXnLCDLR2 field R2DQSGD */ +/** Rank 2 Read DQS Gating Delay: Rank 2 delay select for the read DQS gating + * (DQSG) LCDL. + */ +#define DWC_DDR_PHY_REGS_DXNLCDLR2_R2DQSGD_MASK 0x00ff0000 +#define DWC_DDR_PHY_REGS_DXNLCDLR2_R2DQSGD_SHIFT 16 + +/* Register DXnLCDLR2 field R3DQSGD */ +/** Rank 3 Read DQS Gating Delay: Rank 3 delay select for the read DQS gating + * (DQSG) LCDL. + */ +#define DWC_DDR_PHY_REGS_DXNLCDLR2_R3DQSGD_MASK 0xff000000 +#define DWC_DDR_PHY_REGS_DXNLCDLR2_R3DQSGD_SHIFT 24 + +/* Register DXnGTR field R0DGSL */ +/** Rank n DQS Gating System Latency: This is used to increase the number of clock + * cycles needed to expect valid DDR read data by up to seven extra clock cycles. + * This is used to compensate for board delays and other system delays. Power-up + * default is 000 (i.e. no extra clock cycles required). The SL fields are initially set by + * the PUBm2 during automatic DQS data training but these values can be + * overwritten by a direct write to this register. Every three bits of this register control + * the latency of each of the (up to) four ranks. R0DGSL controls the latency of rank + * 0, R1DGSL controls rank 1, and so on. Valid values are 0 to 7: + */ +#define DWC_DDR_PHY_REGS_DXNGTR_R0DGSL_MASK 0x00000007 +#define DWC_DDR_PHY_REGS_DXNGTR_R0DGSL_SHIFT 0 + +/* Register DXnGTR field R1DGSL */ +/** Rank n DQS Gating System Latency: This is used to increase the number of clock + * cycles needed to expect valid DDR read data by up to seven extra clock cycles. + * This is used to compensate for board delays and other system delays. Power-up + * default is 000 (i.e. no extra clock cycles required). The SL fields are initially set by + * the PUBm2 during automatic DQS data training but these values can be + * overwritten by a direct write to this register. Every three bits of this register control + * the latency of each of the (up to) four ranks. R0DGSL controls the latency of rank + * 0, R1DGSL controls rank 1, and so on. Valid values are 0 to 7: + */ +#define DWC_DDR_PHY_REGS_DXNGTR_R1DGSL_MASK 0x00000038 +#define DWC_DDR_PHY_REGS_DXNGTR_R1DGSL_SHIFT 3 + +/* Register DXnGTR field R2DGSL */ +/** Rank n DQS Gating System Latency: This is used to increase the number of clock + * cycles needed to expect valid DDR read data by up to seven extra clock cycles. + * This is used to compensate for board delays and other system delays. Power-up + * default is 000 (i.e. no extra clock cycles required). The SL fields are initially set by + * the PUBm2 during automatic DQS data training but these values can be + * overwritten by a direct write to this register. Every three bits of this register control + * the latency of each of the (up to) four ranks. R0DGSL controls the latency of rank + * 0, R1DGSL controls rank 1, and so on. Valid values are 0 to 7: + */ +#define DWC_DDR_PHY_REGS_DXNGTR_R2DGSL_MASK 0x000001c0 +#define DWC_DDR_PHY_REGS_DXNGTR_R2DGSL_SHIFT 6 + +/* Register DXnGTR field R3DGSL */ +/** Rank n DQS Gating System Latency: This is used to increase the number of clock + * cycles needed to expect valid DDR read data by up to seven extra clock cycles. + * This is used to compensate for board delays and other system delays. Power-up + * default is 000 (i.e. no extra clock cycles required). The SL fields are initially set by + * the PUBm2 during automatic DQS data training but these values can be + * overwritten by a direct write to this register. Every three bits of this register control + * the latency of each of the (up to) four ranks. R0DGSL controls the latency of rank + * 0, R1DGSL controls rank 1, and so on. Valid values are 0 to 7: + */ +#define DWC_DDR_PHY_REGS_DXNGTR_R3DGSL_MASK 0x00000e00 +#define DWC_DDR_PHY_REGS_DXNGTR_R3DGSL_SHIFT 9 + +/* Register DXnGTR field R0WLSL */ +/** Rank n Write Leveling System Latency: This is used to adjust the write latency + * after write leveling. Power-up default is 01 (i.e. no extra clock cycles required). The + * SL fields are initially set by the PUBm2 during automatic write leveling but these + * values can be overwritten by a direct write to this register. Every two bits of this + * register control the latency of each of the (up to) four ranks. R0WLSL controls the + * latency of rank 0, R1WLSL controls rank 1, and so on. Valid values: + * 00 = Write latency = WL - 1 + * 01 = Write latency = WL + * 10 = Write latency = WL + 1 + * 11 = Reserved + */ +#define DWC_DDR_PHY_REGS_DXNGTR_R0WLSL_MASK 0x00003000 +#define DWC_DDR_PHY_REGS_DXNGTR_R0WLSL_SHIFT 12 + +/* Register DXnGTR field R1WLSL */ +/** Rank n Write Leveling System Latency: This is used to adjust the write latency + * after write leveling. Power-up default is 01 (i.e. no extra clock cycles required). The + * SL fields are initially set by the PUBm2 during automatic write leveling but these + * values can be overwritten by a direct write to this register. Every two bits of this + * register control the latency of each of the (up to) four ranks. R0WLSL controls the + * latency of rank 0, R1WLSL controls rank 1, and so on. Valid values: + * 00 = Write latency = WL - 1 + * 01 = Write latency = WL + * 10 = Write latency = WL + 1 + * 11 = Reserved + */ +#define DWC_DDR_PHY_REGS_DXNGTR_R1WLSL_MASK 0x0000c000 +#define DWC_DDR_PHY_REGS_DXNGTR_R1WLSL_SHIFT 14 + +/* Register DXnGTR field R2WLSL */ +/** Rank n Write Leveling System Latency: This is used to adjust the write latency + * after write leveling. Power-up default is 01 (i.e. no extra clock cycles required). The + * SL fields are initially set by the PUBm2 during automatic write leveling but these + * values can be overwritten by a direct write to this register. Every two bits of this + * register control the latency of each of the (up to) four ranks. R0WLSL controls the + * latency of rank 0, R1WLSL controls rank 1, and so on. Valid values: + * 00 = Write latency = WL - 1 + * 01 = Write latency = WL + * 10 = Write latency = WL + 1 + * 11 = Reserved + */ +#define DWC_DDR_PHY_REGS_DXNGTR_R2WLSL_MASK 0x00030000 +#define DWC_DDR_PHY_REGS_DXNGTR_R2WLSL_SHIFT 16 + +/* Register DXnGTR field R3WLSL */ +/** Rank n Write Leveling System Latency: This is used to adjust the write latency + * after write leveling. Power-up default is 01 (i.e. no extra clock cycles required). The + * SL fields are initially set by the PUBm2 during automatic write leveling but these + * values can be overwritten by a direct write to this register. Every two bits of this + * register control the latency of each of the (up to) four ranks. R0WLSL controls the + * latency of rank 0, R1WLSL controls rank 1, and so on. Valid values: + * 00 = Write latency = WL - 1 + * 01 = Write latency = WL + * 10 = Write latency = WL + 1 + * 11 = Reserved + */ +#define DWC_DDR_PHY_REGS_DXNGTR_R3WLSL_MASK 0x000c0000 +#define DWC_DDR_PHY_REGS_DXNGTR_R3WLSL_SHIFT 18 + +#ifdef __cplusplus +} +#endif + +#endif + +/** @} end of DDR group */ + diff --git a/target/linux/alpine/files/drivers/edac/al/al_hal_ddr_utils.h b/target/linux/alpine/files/drivers/edac/al/al_hal_ddr_utils.h new file mode 100644 index 00000000000000..d96090d3204173 --- /dev/null +++ b/target/linux/alpine/files/drivers/edac/al/al_hal_ddr_utils.h @@ -0,0 +1,253 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @addtogroup groupddr + * + * @{ + * @file al_hal_ddr_utils.h + * + */ + +#ifndef __AL_HAL_DDR_UTILS_H__ +#define __AL_HAL_DDR_UTILS_H__ + +#include +#include +#include "al_hal_ddr_ctrl_regs.h" +#include "al_hal_ddr_phy_regs.h" + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/* Default timeout for register polling operations */ +#define DEFAULT_TIMEOUT 5000 + +#if 0 /* Check if masked required */ +#define _al_reg_write32_masked(reg, mask, data) \ + __al_reg_write32_masked(__LINE__, reg, mask, data) + +#define ___al_reg_write32_masked(reg, mask, data) \ + ____al_reg_write32_masked(__LINE__, reg, mask, data) + +void __al_reg_write32_masked( + int line, + uint32_t *reg, + uint32_t mask, + uint32_t data) +{ + if (!(al_reg_read32(reg) & ~mask)) + al_info("%d can be non masked!\n", line); + al_reg_write32_masked(reg, mask, data); +} + +void ____al_reg_write32_masked( + int line, + uint32_t *reg, + uint32_t mask, + uint32_t data) +{ + if (al_reg_read32(reg) & ~mask) + al_info("%d can not be non masked!\n", line); + al_reg_write32_masked(reg, mask, data); +} +#else +#define _al_reg_write32_masked(reg, mask, data) \ + al_reg_write32_masked(reg, mask, data) + +static inline void ___al_reg_write32_masked( + uint32_t *reg, + uint32_t mask __attribute__((__unused__)), + uint32_t data) +{ + al_reg_write32(reg, data); +} +#endif + +/******************************************************************************* + ******************************************************************************/ +static int al_ddr_reg_poll32( + uint32_t __iomem *reg, + uint32_t mask, + uint32_t data, + unsigned int timeout) +{ + while ((al_reg_read32(reg) & mask) != data) { + if (timeout) { + al_udelay(1); + timeout--; + } else { + return -ETIME; + } + } + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +/* Wait for controller normal operating mode */ +static int al_ddr_ctrl_wait_for_normal_operating_mode( + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs) +{ + int err; + + err = al_ddr_reg_poll32( + &ctrl_regs->stat, + DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_MASK, + DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_NORMAL, + DEFAULT_TIMEOUT); + + if (err) { + al_err("%s: al_ddr_reg_poll32 failed!\n", __func__); + return err; + } + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +/** + * Disabling VT calculation + * VP calculation must be disabled during writes to the delay line registers + */ +static int al_ddr_phy_vt_calc_disable( + struct al_ddr_phy_regs __iomem *phy_regs) +{ + int err; + + _al_reg_write32_masked( + &phy_regs->PGCR[1], + DWC_DDR_PHY_REGS_PGCR1_INHVT, + DWC_DDR_PHY_REGS_PGCR1_INHVT); + + err = al_ddr_reg_poll32( + &phy_regs->PGSR[1], + DWC_DDR_PHY_REGS_PGSR1_VTSTOP, + DWC_DDR_PHY_REGS_PGSR1_VTSTOP, + DEFAULT_TIMEOUT); + + if (err) { + al_err("%s: al_ddr_reg_poll32 failed!\n", __func__); + return err; + } + + return 0; +} + +/******************************************************************************* + ******************************************************************************/ +/* Enabling VT calculation */ +static void al_ddr_phy_vt_calc_enable( + struct al_ddr_phy_regs __iomem *phy_regs) +{ + _al_reg_write32_masked( + &phy_regs->PGCR[1], + DWC_DDR_PHY_REGS_PGCR1_INHVT, + 0); +} + +/******************************************************************************* + ******************************************************************************/ +/* Stop DDR controller access to the PHY*/ +static inline void al_ddr_ctrl_stop( + void __iomem *ddr_ctrl_regs_base) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + ___al_reg_write32_masked( + &ctrl_regs->dfimisc, + DWC_DDR_UMCTL2_REGS_DFIMISC_DFI_INIT_COMPLETE_EN, + 0); + + /* Stop controller refresh and ZQ calibration commands */ + _al_reg_write32_masked( + &ctrl_regs->rfshctl3, + DWC_DDR_UMCTL2_REGS_RFSHCTL3_DIS_AUTO_REFRESH, + DWC_DDR_UMCTL2_REGS_RFSHCTL3_DIS_AUTO_REFRESH); + + _al_reg_write32_masked( + &ctrl_regs->zqctl0, + DWC_DDR_UMCTL2_REGS_ZQCTL0_DIS_AUTO_ZQ, + DWC_DDR_UMCTL2_REGS_ZQCTL0_DIS_AUTO_ZQ); + + al_data_memory_barrier(); +} + +/******************************************************************************* + ******************************************************************************/ +/* Resume DDR controller access to the PHY*/ +static inline void al_ddr_ctrl_resume( + void __iomem *ddr_ctrl_regs_base) +{ + struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs = + &((struct al_ddr_ctrl_regs __iomem *) + ddr_ctrl_regs_base)->umctl2_regs; + + ___al_reg_write32_masked( + &ctrl_regs->dfimisc, + DWC_DDR_UMCTL2_REGS_DFIMISC_DFI_INIT_COMPLETE_EN, + DWC_DDR_UMCTL2_REGS_DFIMISC_DFI_INIT_COMPLETE_EN); + + /* Resume controller refresh and ZQ calibration commands */ + _al_reg_write32_masked( + &ctrl_regs->rfshctl3, + DWC_DDR_UMCTL2_REGS_RFSHCTL3_DIS_AUTO_REFRESH, + 0); + + _al_reg_write32_masked( + &ctrl_regs->zqctl0, + DWC_DDR_UMCTL2_REGS_ZQCTL0_DIS_AUTO_ZQ, + 0); + + al_data_memory_barrier(); + + al_ddr_ctrl_wait_for_normal_operating_mode(ctrl_regs); +} + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of DDR group */ +#endif + diff --git a/target/linux/alpine/files/drivers/edac/al/al_mc_edac_core.c b/target/linux/alpine/files/drivers/edac/al/al_mc_edac_core.c new file mode 100644 index 00000000000000..e48de158366ec5 --- /dev/null +++ b/target/linux/alpine/files/drivers/edac/al/al_mc_edac_core.c @@ -0,0 +1,603 @@ +/* + * Copyright 2013 Annapurna Labs Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "al_hal_ddr.h" + +#include "../edac_core.h" +#include "../edac_module.h" + + +#define DEVICE_ATTR_FLAGS S_IRUGO | S_IWUSR + +struct inject_addr { + int col; + int rank; + int row; + int bank; +}; + +struct al_mc_drvdata { + struct mem_ctl_info *mci; + void __iomem *vbase; + struct inject_addr inject; + int inject_enabled; + struct delayed_work handle_corr_err_work; + struct delayed_work handle_uncorr_err_work; +}; + +/* Memory Controller error handler */ +static irqreturn_t al_mc_corr_err_handler(int irq, void *dev_id) +{ + struct mem_ctl_info *mci = dev_id; + struct al_mc_drvdata *drvdata = mci->pvt_info; + + schedule_delayed_work(&drvdata->handle_corr_err_work, + msecs_to_jiffies(1)); + + return IRQ_HANDLED; +} + +static irqreturn_t al_mc_uncorr_err_handler(int irq, void *dev_id) +{ + struct mem_ctl_info *mci = dev_id; + struct al_mc_drvdata *drvdata = mci->pvt_info; + + schedule_delayed_work(&drvdata->handle_uncorr_err_work, + msecs_to_jiffies(1)); + + return IRQ_HANDLED; +} + +static void al_mc_corr_err_work(struct work_struct *work) +{ + struct al_mc_drvdata *drvdata = container_of(work, struct al_mc_drvdata, + handle_corr_err_work.work); + struct mem_ctl_info *mci = drvdata->mci; + struct al_ddr_ecc_status ecc_corr_status; + al_phys_addr_t err_addr; + + /* get the ecc status */ + al_ddr_ecc_status_get(drvdata->vbase, + &ecc_corr_status, NULL); + + al_ddr_ecc_corr_count_clear(drvdata->vbase); + + al_ddr_address_translate_dram2sys(drvdata->vbase, &err_addr, + ecc_corr_status.rank, ecc_corr_status.bank, + ecc_corr_status.col, ecc_corr_status.row); + + /* log the error */ + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + ecc_corr_status.err_cnt, + err_addr >> PAGE_SHIFT, err_addr & PAGE_MASK, + ecc_corr_status.syndromes_31_0, + ecc_corr_status.rank, -1, -1, mci->ctl_name, ""); + + al_ddr_ecc_corr_int_clear(NULL, drvdata->vbase); +} + +static void al_mc_uncorr_err_work(struct work_struct *work) +{ + struct al_mc_drvdata *drvdata = container_of(work, struct al_mc_drvdata, + handle_uncorr_err_work.work); + struct mem_ctl_info *mci = drvdata->mci; + struct al_ddr_ecc_status ecc_uncorr_status; + al_phys_addr_t err_addr; + + /* get the ecc status */ + al_ddr_ecc_status_get(drvdata->vbase, + NULL, &ecc_uncorr_status); + + al_ddr_ecc_uncorr_count_clear(drvdata->vbase); + + al_ddr_address_translate_dram2sys(drvdata->vbase, &err_addr, + ecc_uncorr_status.rank, ecc_uncorr_status.bank, + ecc_uncorr_status.col, ecc_uncorr_status.row); + + /* log the error */ + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + ecc_uncorr_status.err_cnt, + err_addr >> PAGE_SHIFT, err_addr & PAGE_MASK, 0, + ecc_uncorr_status.rank, -1, -1, mci->ctl_name, ""); + + al_ddr_ecc_uncorr_int_clear(NULL, drvdata->vbase); +} + + +/* + * The following functions implement the sysfs behavior + */ + +/* Data injection physical address configuration */ +static ssize_t al_inject_phys_addr_store( + struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) +{ + struct mem_ctl_info *mci = dev_get_drvdata(dev); + struct al_mc_drvdata *drvdata = mci->pvt_info; + unsigned long data_val; + al_phys_addr_t addr; + int rc; + + rc = kstrtoul(data, 16, &data_val); + if (rc < 0) + return -EIO; + + addr = (al_phys_addr_t)data_val; + rc = al_ddr_address_translate_sys2dram(drvdata->vbase, addr, + &drvdata->inject.rank, &drvdata->inject.bank, + &drvdata->inject.col, &drvdata->inject.row); + if (rc < 0) + return -EIO; + + if (drvdata->inject_enabled == 1) { + rc = al_ddr_ecc_data_poison_enable( + drvdata->vbase, + drvdata->inject.rank, + drvdata->inject.bank, + drvdata->inject.col, + drvdata->inject.row); + if (rc < 0) + return -EIO; + } + + return count; +} + +static ssize_t al_inject_phys_addr_show( + struct device *dev, + struct device_attribute *mattr, + char *data) +{ + struct mem_ctl_info *mci = dev_get_drvdata(dev); + struct al_mc_drvdata *drvdata = mci->pvt_info; + al_phys_addr_t addr; + + al_ddr_address_translate_dram2sys(drvdata->vbase, &addr, + drvdata->inject.rank, drvdata->inject.bank, + drvdata->inject.col, drvdata->inject.row); + + return sprintf(data, "0x%llx\n", (unsigned long long)addr); +} + + +/* Data injection en/disable interface */ +static ssize_t al_inject_enable_store( + struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) +{ + struct mem_ctl_info *mci = dev_get_drvdata(dev); + struct al_mc_drvdata *drvdata = mci->pvt_info; + unsigned long data_val; + int rc; + + rc = kstrtoul(data, 10, &data_val); + if (rc < 0) + return -EIO; + + if (data_val == 1) + rc = al_ddr_ecc_data_poison_enable( + drvdata->vbase, + drvdata->inject.rank, + drvdata->inject.bank, + drvdata->inject.col, + drvdata->inject.row); + else if (data_val == 0) + rc = al_ddr_ecc_data_poison_disable(drvdata->vbase); + else + return -EIO; + + drvdata->inject_enabled = data_val; + if (rc < 0) + return rc; + + return count; +} + +static ssize_t al_inject_enable_show( + struct device *dev, + struct device_attribute *mattr, + char *data) +{ + struct mem_ctl_info *mci = dev_get_drvdata(dev); + struct al_mc_drvdata *drvdata = mci->pvt_info; + + return sprintf(data, "%d\n", drvdata->inject_enabled); +} + +/* Data injection mechanism DDR-addressing configuration + * the store/show functions share the same implementation, defined below + */ +#define al_inject_store_impl(_field) \ + do { \ + struct mem_ctl_info *mci = dev_get_drvdata(dev); \ + struct al_mc_drvdata *drvdata = mci->pvt_info; \ + unsigned long data_val; \ + int rc; \ + \ + rc = kstrtoul(data, 10, &data_val); \ + if (rc < 0) \ + return -EIO; \ + drvdata->inject._field = data_val; \ + /* user must issue enable cmd after changing addr */ \ + drvdata->inject_enabled = 0; \ + rc = al_ddr_ecc_data_poison_disable(drvdata->vbase); \ + if (rc < 0) \ + return -EIO; \ + } while(0) + +#define al_inject_show_impl(_field) \ + do { \ + struct mem_ctl_info *mci = dev_get_drvdata(dev); \ + struct al_mc_drvdata *drvdata = mci->pvt_info; \ + \ + return sprintf(data, "%d\n", drvdata->inject._field); \ + } while (0) + +static ssize_t al_inject_col_store( + struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) +{ + al_inject_store_impl(col); + + return count; +} + +static ssize_t al_inject_rank_store( + struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) +{ + al_inject_store_impl(rank); + + return count; +} + +static ssize_t al_inject_row_store( + struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) +{ + al_inject_store_impl(row); + + return count; +} + +static ssize_t al_inject_bank_store( + struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) +{ + al_inject_store_impl(bank); + + return count; +} + +static ssize_t al_inject_col_show( + struct device *dev, + struct device_attribute *mattr, + char *data) +{ + al_inject_show_impl(col); +} + +static ssize_t al_inject_rank_show( + struct device *dev, + struct device_attribute *mattr, + char *data) +{ + al_inject_show_impl(rank); +} + +static ssize_t al_inject_row_show( + struct device *dev, + struct device_attribute *mattr, + char *data) +{ + al_inject_show_impl(row); +} + +static ssize_t al_inject_bank_show( + struct device *dev, + struct device_attribute *mattr, + char *data) +{ + al_inject_show_impl(bank); +} + +/* show a short help for the sysfs attributes */ +static ssize_t al_inject_help_show( + struct device *dev, + struct device_attribute *mattr, + char *data) +{ + return sprintf(data, + "inject help\n" + "-----------\n" + "All of the following attributes use the sysfs interface for\n" + "setting/showing values:\n" + "echo VALUE > ATTRIBUTE - set ATTRIBUTE to VALUE\n" + "cat ATTRIBUTE - show the current value of ATTRIBUTE\n" + "\nAvailable commands:\n" + "- inject_phys_addr\n" + "\tset/show physical address for UC error injection\n" + "- inject_col/rank/row/bank\n" + "\tset/show ddr col/rank/row/bank value for UC error injection\n" + "- inject_enable\n" + "\tenable/disable the device (by setting to 1/0), or print\n" + "\tcurrent state\n" + "\t(*)when changing an address configuration, you need to\n" + "\t re-enable the interface i.o. to apply your changes\n" + ); +} + +/* define and bind all of the store/show implementations with their + * corresponding sysfs attributes */ +static DEVICE_ATTR(inject_phys_addr, DEVICE_ATTR_FLAGS, + al_inject_phys_addr_show, al_inject_phys_addr_store); +static DEVICE_ATTR(inject_enable, DEVICE_ATTR_FLAGS, + al_inject_enable_show, al_inject_enable_store); +static DEVICE_ATTR(inject_col,DEVICE_ATTR_FLAGS, + al_inject_col_show, al_inject_col_store); +static DEVICE_ATTR(inject_rank,DEVICE_ATTR_FLAGS, + al_inject_rank_show, al_inject_rank_store); +static DEVICE_ATTR(inject_row,DEVICE_ATTR_FLAGS, + al_inject_row_show, al_inject_row_store); +static DEVICE_ATTR(inject_bank,DEVICE_ATTR_FLAGS, + al_inject_bank_show, al_inject_bank_store); +static DEVICE_ATTR(inject_help, S_IRUGO, + al_inject_help_show, NULL); + +static void al_delete_sysfs_devices(struct mem_ctl_info *mci) +{ + device_remove_file(&mci->dev, &dev_attr_inject_phys_addr); + device_remove_file(&mci->dev, &dev_attr_inject_enable); + device_remove_file(&mci->dev, &dev_attr_inject_col); + device_remove_file(&mci->dev, &dev_attr_inject_rank); + device_remove_file(&mci->dev, &dev_attr_inject_row); + device_remove_file(&mci->dev, &dev_attr_inject_bank); + device_remove_file(&mci->dev, &dev_attr_inject_help); +} + +static int al_create_sysfs_devices(struct mem_ctl_info *mci) +{ + int rc; + + rc = device_create_file(&mci->dev, &dev_attr_inject_phys_addr); + if (rc < 0) + goto err; + rc = device_create_file(&mci->dev, &dev_attr_inject_enable); + if (rc < 0) + goto err; + rc = device_create_file(&mci->dev, &dev_attr_inject_col); + if (rc < 0) + goto err; + rc = device_create_file(&mci->dev, &dev_attr_inject_rank); + if (rc < 0) + goto err; + rc = device_create_file(&mci->dev, &dev_attr_inject_row); + if (rc < 0) + goto err; + rc = device_create_file(&mci->dev, &dev_attr_inject_bank); + if (rc < 0) + goto err; + rc = device_create_file(&mci->dev, &dev_attr_inject_help); + if (rc < 0) + goto err; + + return rc; +err: + al_delete_sysfs_devices(mci); + return rc; +} + +/* + * end of sysfs section + */ + + +static int al_mc_probe(struct platform_device *pdev) +{ + struct edac_mc_layer layers[1]; + struct mem_ctl_info *mci = NULL; + struct al_mc_drvdata *drvdata; + struct dimm_info *dimm; + struct resource *r; + struct al_ddr_ecc_cfg ecc_cfg; + void __iomem *vbase; + int ecc_corr_irq, ecc_uncorr_irq; + unsigned int active_ranks, rank_addr_bits; + int i, res = 0; + + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + /* initialize the controller private database */ + /* set controller register base address */ + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "Unable to get mem resource\n"); + res = -ENODEV; + goto err; + } + + if (!devm_request_mem_region(&pdev->dev, r->start, + resource_size(r), dev_name(&pdev->dev))) { + dev_err(&pdev->dev, "Error while requesting mem region\n"); + res = -EBUSY; + goto err; + } + + vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); + if (!vbase) { + dev_err(&pdev->dev, "Unable to map regs\n"); + res = -ENOMEM; + goto err; + } + + al_ddr_ecc_cfg_get(vbase, &ecc_cfg); + if (!ecc_cfg.ecc_enabled) { + dev_err(&pdev->dev, "No ECC present, or ECC disabled\n"); + res = -ENODEV; + goto err; + } + + active_ranks = al_ddr_active_ranks_get(vbase); + if (!active_ranks) { + dev_err(&pdev->dev, "Failed to detect active ranks\n"); + res = -ENODEV; + goto err; + } + + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; + layers[0].size = active_ranks; + layers[0].is_virt_csrow = true; + mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, + sizeof(struct al_mc_drvdata)); + if (!mci) + return -ENOMEM; + + mci->pdev = &pdev->dev; + drvdata = mci->pvt_info; + drvdata->mci = mci; + drvdata->vbase = vbase; + platform_set_drvdata(pdev, mci); + + /* set default address for inject mechanism */ + drvdata->inject.col = 0; + drvdata->inject.rank = 0; + drvdata->inject.row = 0; + drvdata->inject.bank = 0; + drvdata->inject_enabled = 0; + INIT_DELAYED_WORK(&drvdata->handle_corr_err_work, al_mc_corr_err_work); + INIT_DELAYED_WORK(&drvdata->handle_uncorr_err_work, al_mc_uncorr_err_work); + + ecc_corr_irq = + al_fabric_get_cause_irq(0, AL_FABRIC_IRQ_MCTL_ECC_CORR_ERR); + ecc_uncorr_irq = + al_fabric_get_cause_irq(0, AL_FABRIC_IRQ_MCTL_ECC_UNCORR_ERR); + + /* + * Configure the Memory Controller Info struct, according to the + * following: + * - Use DDR3 type memory + * - Single-bit Error Correction, Double-bit Error Detection (SECDED) + * - Scrub status is set according to the controller's configuration + */ + mci->mtype_cap = MEM_FLAG_DDR3; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; + mci->edac_cap = EDAC_FLAG_SECDED; + mci->mod_name = dev_name(&pdev->dev); + mci->mod_ver = "1"; + mci->ctl_name = dev_name(&pdev->dev); + mci->scrub_mode = ecc_cfg.ecc_enabled ? + (ecc_cfg.scrub_enabled ? SCRUB_HW_SRC : SCRUB_NONE) : + SCRUB_UNKNOWN; + + rank_addr_bits = al_ddr_bits_per_rank_get(drvdata->vbase); + /* + * Set dimm attributes + * - Use DDR3 type memory + * - Single-bit Error Correction, Double-bit Error Detection (SECDED) + * - Number of pages can be calculated using rank size and page shift + * - Granularity of reported errors (in bytes) according to data width + */ + for (i = 0 ; i < active_ranks ; i++) { + dimm = mci->dimms[i]; + dimm->nr_pages = (1ULL << rank_addr_bits) >> PAGE_SHIFT; + dimm->grain = (al_ddr_data_width_get(drvdata->vbase) == + AL_DDR_DATA_WIDTH_64_BITS) ? 8 : 4; + dimm->dtype = DEV_UNKNOWN; + dimm->mtype = MEM_DDR3; + dimm->edac_mode = EDAC_SECDED; + } + + res = edac_mc_add_mc(mci); + if (res < 0) + goto err; + + res = devm_request_irq(&pdev->dev, ecc_corr_irq, + al_mc_corr_err_handler, 0, dev_name(&pdev->dev), mci); + if (res < 0) { + dev_err(&pdev->dev, "IRQ request failed (ecc corr irq) %d\n", + ecc_corr_irq); + goto err; + } + + res = devm_request_irq(&pdev->dev, ecc_uncorr_irq, + al_mc_uncorr_err_handler, 0, dev_name(&pdev->dev), mci); + if (res < 0) { + dev_err(&pdev->dev, "IRQ request failed (ecc uncorr irq) %d\n", + ecc_uncorr_irq); + goto err; + } + + res = al_create_sysfs_devices(mci); + if (res < 0) + goto err; + + devres_close_group(&pdev->dev, NULL); + return 0; +err: + devres_release_group(&pdev->dev, NULL); + if (mci) + edac_mc_free(mci); + return res; +} + +static int al_mc_remove(struct platform_device *pdev) +{ + struct mem_ctl_info *mci = platform_get_drvdata(pdev); + + edac_mc_del_mc(&pdev->dev); + al_delete_sysfs_devices(mci); + edac_mc_free(mci); + return 0; +} + +static const struct of_device_id al_mc_of_match[] = { + { .compatible = "annapurna-labs,al-mc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, al_mc_of_match); + +static struct platform_driver al_mc_edac_driver = { + .probe = al_mc_probe, + .remove = al_mc_remove, + .driver = { + .name = "al_mc_edac", + .of_match_table = al_mc_of_match, + }, +}; + +module_platform_driver(al_mc_edac_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Annapurna Labs Inc."); +MODULE_DESCRIPTION("EDAC Driver for Annapurna Labs MC"); diff --git a/target/linux/alpine/files/drivers/mtd/nand/raw/al/Kconfig b/target/linux/alpine/files/drivers/mtd/nand/raw/al/Kconfig new file mode 100644 index 00000000000000..31522cb2a7d91e --- /dev/null +++ b/target/linux/alpine/files/drivers/mtd/nand/raw/al/Kconfig @@ -0,0 +1,5 @@ +config MTD_NAND_AL + tristate "NAND support for Annapurna Labs NAND controller" + select AL_DMA + help + Enable support for NAND Flash chips on Annapurna Labs NAND controllers. diff --git a/target/linux/alpine/files/drivers/mtd/nand/raw/al/Makefile b/target/linux/alpine/files/drivers/mtd/nand/raw/al/Makefile new file mode 100644 index 00000000000000..b7c8164e6f303d --- /dev/null +++ b/target/linux/alpine/files/drivers/mtd/nand/raw/al/Makefile @@ -0,0 +1,5 @@ +ccflags-y := -I$(srctree)/arch/arm/mach-alpine/include + +obj-$(CONFIG_MTD_NAND_AL) += al-nand.o +al-nand-objs += al_nand.o +al-nand-objs += al_hal_nand.o diff --git a/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand.c b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand.c new file mode 100644 index 00000000000000..3257afad770f92 --- /dev/null +++ b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand.c @@ -0,0 +1,1650 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include "al_hal_nand.h" +#include "al_hal_nand_regs.h" +#include "al_hal_nand_coded_properties.h" + +#define AL_NAND_DEVICE_CMD_PAGE_READ_FIRST 0x00 +#define AL_NAND_DEVICE_CMD_PAGE_READ_SECOND 0x30 +#define AL_NAND_DEVICE_CMD_CHG_READ_COL_FIRST 0x05 +#define AL_NAND_DEVICE_CMD_CHG_READ_COL_SECOND 0xE0 +#define AL_NAND_DEVICE_CMD_PAGE_PRG_FIRST 0x80 +#define AL_NAND_DEVICE_CMD_PAGE_PRG_SECOND 0x10 +#define AL_NAND_DEVICE_CMD_CHG_PRG_COL_FIRST 0x85 + +#define DATA_BUFF_OFFSET 0x000000 +#define CMD_BUFF_OFFSET 0x100000 +#define WRAP_BASE_OFFSET 0x200000 +#define CTRL_BASE_OFFSET 0x201000 + +static int _al_nand_dev_config_modes( + struct al_nand_ctrl_obj *obj); + +static void _al_nand_dev_config_ctl_reg_0( + struct al_nand_ctrl_obj *obj); + +static void _al_nand_dev_config_sdr_timing_params( + struct al_nand_ctrl_obj *obj); + +static void _al_nand_dev_config_rdy_busy_wait_cnt( + struct al_nand_ctrl_obj *obj); + +static void _al_nand_dev_config_bch_ctl_reg_0( + struct al_nand_ctrl_obj *obj); + +static void _al_nand_dev_config_bch_ctl_reg_1( + struct al_nand_ctrl_obj *obj); + +static void _al_nand_data_buf_wait_for_cw_vacancy( + struct al_nand_ctrl_obj *obj); + +static void _al_nand_cmd_seq_add_command( + uint32_t **cmd_seq_buff, + int *cmd_seq_buff_num_entries, + enum al_nand_command_type type, + uint8_t argument); + +static void _al_nand_cmd_seq_add_command_cyc( + uint32_t **cmd_seq_buff, + int *cmd_seq_buff_num_entries, + enum al_nand_command_type type, + int argument, + int num_cyc); + +static int _al_nand_get_bch_num_corr_bits( + struct al_nand_ctrl_obj *obj); + +static int _al_nand_get_bch_cw_parity_size( + struct al_nand_ctrl_obj *obj); + +static int _al_nand_get_cw_spare_cnt( + struct al_nand_ctrl_obj *obj); + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_init( + struct al_nand_ctrl_obj *obj, + void __iomem *nand_base, + struct al_ssm_dma *raid_dma, + uint32_t raid_dma_qid) +{ + int status = 0; + + uint32_t reg_val; + + al_assert(obj); + al_assert(nand_base); + + obj->regs_base = (nand_base + CTRL_BASE_OFFSET); + + obj->wrap_regs_base = (nand_base + WRAP_BASE_OFFSET); + obj->cmd_buff_base = (nand_base + CMD_BUFF_OFFSET); + obj->data_buff_base = (nand_base + DATA_BUFF_OFFSET); + + obj->raid_dma = raid_dma; + obj->raid_dma_qid = raid_dma_qid; + + obj->current_dev_index = -1; + +#if (AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_VAL_NUM_BNKS != 6) + #error Assumption wrong! +#endif + + reg_val = + AL_REG_BITS_FIELD( + AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_SHIFT(0), + AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_VAL_NAND) | + AL_REG_BITS_FIELD( + AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_SHIFT(1), + AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_VAL_NAND) | + AL_REG_BITS_FIELD( + AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_SHIFT(2), + AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_VAL_NAND) | + AL_REG_BITS_FIELD( + AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_SHIFT(3), + AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_VAL_NAND) | + AL_REG_BITS_FIELD( + AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_SHIFT(4), + AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_VAL_NAND) | + AL_REG_BITS_FIELD( + AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_SHIFT(5), + AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_VAL_NAND); + + al_reg_write32(&obj->regs_base->flash_ctl_3, reg_val); + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_terminate( + struct al_nand_ctrl_obj *obj) +{ + al_assert(obj); +} + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_reset( + struct al_nand_ctrl_obj *obj, + int reset_mask) +{ + al_assert(obj); + + al_reg_write32(&obj->regs_base->reset_reg, reset_mask); + + if (reset_mask & AL_NAND_RESET_MASK_TIMING_ENGINE) { + uint32_t reg_val; + + do { + reg_val = al_reg_read32( + &obj->regs_base->reset_status_reg); + } while (0 == AL_REG_BIT_GET( + reg_val, + AL_NAND_RESET_STATUS_REG_TIM_ENG_RST_DN)); + } +} + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_dev_select( + struct al_nand_ctrl_obj *obj, + int dev_index) +{ + uint32_t reg_val; + + al_assert(obj); + al_assert(dev_index >= 0); + al_assert(dev_index < 8); + + reg_val = al_reg_read32(&obj->regs_base->ctl_reg0); + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_CTL_REG0_CS1_MASK, + AL_NAND_CTL_REG0_CS1_SHIFT, + dev_index); + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_CTL_REG0_CS2_MASK, + AL_NAND_CTL_REG0_CS2_SHIFT, + dev_index); + + al_reg_write32(&obj->regs_base->ctl_reg0, reg_val); + + obj->current_dev_index = dev_index; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_dev_config_basic( + struct al_nand_ctrl_obj *obj) +{ + int status = 0; + + struct al_nand_dev_properties dev_properties; + struct al_nand_ecc_config ecc_config; + + al_assert(obj); + + al_memset(&dev_properties, 0, sizeof(struct al_nand_dev_properties)); + dev_properties.timingMode = AL_NAND_DEVICE_TIMING_MODE_ONFI_0; + + al_memset(&ecc_config, 0, sizeof(struct al_nand_ecc_config)); + + if (0 != al_nand_dev_config( + obj, + &dev_properties, + &ecc_config)) { + al_err("al_nand_dev_config failed!\n"); + status = -EIO; + goto done; + } + +done: + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_dev_config( + struct al_nand_ctrl_obj *obj, + struct al_nand_dev_properties *dev_properties, + struct al_nand_ecc_config *ecc_config) +{ + int status = 0; + + al_assert(obj); + al_assert(dev_properties); + al_assert(ecc_config); + + obj->dev_properties = *dev_properties; + obj->ecc_config = *ecc_config; + + if (0 != _al_nand_dev_config_modes(obj)) { + al_err("_al_nand_dev_config_modes failed!\n"); + status = -EIO; + goto done; + } + + _al_nand_dev_config_ctl_reg_0(obj); + + _al_nand_dev_config_sdr_timing_params(obj); + + _al_nand_dev_config_rdy_busy_wait_cnt(obj); + + _al_nand_dev_config_bch_ctl_reg_0(obj); + + _al_nand_dev_config_bch_ctl_reg_1(obj); + +done: + + return status; +} +/******************************************************************************/ +/******************************************************************************/ +#if NAND_CODED_PROPERTIES_NUM_WORDS != 4 +#error NAND_CODED_PROPERTIES_NUM_WORDS != 4 +#endif + +int al_nand_properties_decode( + void __iomem *pbs_regs_base, + struct al_nand_dev_properties *dev_properties, + struct al_nand_ecc_config *ecc_config, + struct al_nand_extra_dev_properties *dev_ext_props) +{ + int fieldVal; + uint32_t nand_properties[NAND_CODED_PROPERTIES_NUM_WORDS]; + struct al_pbs_regs *pbs_regs = pbs_regs_base; + + nand_properties[0] = al_reg_read32(&pbs_regs->unit.cfg_nand_0); + nand_properties[1] = al_reg_read32(&pbs_regs->unit.cfg_nand_1); + nand_properties[2] = al_reg_read32(&pbs_regs->unit.cfg_nand_2); + + if (0 == AL_REG_FIELD_GET(nand_properties[0], + NAND_CODED_PROPERTIES_WORD_0_IS_VALID_MASK, + NAND_CODED_PROPERTIES_WORD_0_IS_VALID_SHIFT)) { + al_err("nand properties is not valid\n"); + return -EINVAL; + } + + al_memset(dev_properties, 0, sizeof(struct al_nand_dev_properties)); + al_memset(ecc_config, 0, sizeof(struct al_nand_ecc_config)); + al_memset(dev_ext_props, 0, + sizeof(struct al_nand_extra_dev_properties)); + + fieldVal = AL_REG_FIELD_GET(nand_properties[0], + NAND_CODED_PROPERTIES_WORD_0_PAGE_SIZE_MASK, + NAND_CODED_PROPERTIES_WORD_0_PAGE_SIZE_SHIFT); + + dev_ext_props->pageSize = 512 << fieldVal; + + if (0 == fieldVal) + dev_properties->pageSize = AL_NAND_DEVICE_PAGE_SIZE_512; + else + dev_properties->pageSize = fieldVal - 2; + + dev_ext_props->blockSize = + dev_ext_props->pageSize * 16 * + (1 << AL_REG_FIELD_GET(nand_properties[0], + NAND_CODED_PROPERTIES_WORD_0_BLOCK_SIZE_MASK, + NAND_CODED_PROPERTIES_WORD_0_BLOCK_SIZE_SHIFT)); + + dev_properties->sdrDataWidth = + AL_REG_FIELD_GET(nand_properties[0], + NAND_CODED_PROPERTIES_WORD_0_BUS_WIDTH_MASK, + NAND_CODED_PROPERTIES_WORD_0_BUS_WIDTH_SHIFT); + + dev_ext_props->wordSize = (1 << dev_properties->sdrDataWidth); + + dev_properties->num_col_cyc = + 1 + AL_REG_FIELD_GET(nand_properties[0], + NAND_CODED_PROPERTIES_WORD_0_COL_CYCLES_MASK, + NAND_CODED_PROPERTIES_WORD_0_COL_CYCLES_SHIFT); + + dev_properties->num_row_cyc = + 1 + AL_REG_FIELD_GET(nand_properties[0], + NAND_CODED_PROPERTIES_WORD_0_ROW_CYCLES_MASK, + NAND_CODED_PROPERTIES_WORD_0_ROW_CYCLES_SHIFT); + + dev_ext_props->badBlockMarking.method = + AL_REG_FIELD_GET(nand_properties[0], + NAND_CODED_PROPERTIES_WORD_0_BBM_METHOD_MASK, + NAND_CODED_PROPERTIES_WORD_0_BBM_METHOD_SHIFT); + + dev_ext_props->badBlockMarking.location1 = + AL_REG_FIELD_GET(nand_properties[0], + NAND_CODED_PROPERTIES_WORD_0_BBM_LOC1_MASK, + NAND_CODED_PROPERTIES_WORD_0_BBM_LOC1_SHIFT); + + dev_ext_props->badBlockMarking.location2 = + AL_REG_FIELD_GET(nand_properties[0], + NAND_CODED_PROPERTIES_WORD_0_BBM_LOC2_MASK, + NAND_CODED_PROPERTIES_WORD_0_BBM_LOC2_SHIFT); + + dev_properties->timingMode = AL_REG_FIELD_GET(nand_properties[0], + NAND_CODED_PROPERTIES_WORD_0_TIMING_SET_MASK, + NAND_CODED_PROPERTIES_WORD_0_TIMING_SET_SHIFT); + + fieldVal = AL_REG_FIELD_GET(nand_properties[0], + NAND_CODED_PROPERTIES_WORD_0_ECC_ALG_MASK, + NAND_CODED_PROPERTIES_WORD_0_ECC_ALG_SHIFT); + + dev_ext_props->eccIsEnabled = (0 != fieldVal) ? 1 : 0; + + if (dev_ext_props->eccIsEnabled) + ecc_config->algorithm = fieldVal - 1; + + ecc_config->num_corr_bits = + AL_REG_FIELD_GET(nand_properties[1], + NAND_CODED_PROPERTIES_WORD_1_ECC_BCH_STRENGTH_MASK, + NAND_CODED_PROPERTIES_WORD_1_ECC_BCH_STRENGTH_SHIFT); + + ecc_config->messageSize = + AL_REG_FIELD_GET(nand_properties[1], + NAND_CODED_PROPERTIES_WORD_1_ECC_BCH_CODEWORD_MASK, + NAND_CODED_PROPERTIES_WORD_1_ECC_BCH_CODEWORD_SHIFT); + + ecc_config->spareAreaOffset = + dev_ext_props->pageSize + + AL_REG_FIELD_GET(nand_properties[1], + NAND_CODED_PROPERTIES_WORD_1_ECC_LOC_MASK, + NAND_CODED_PROPERTIES_WORD_1_ECC_LOC_SHIFT); + + dev_properties->timing.tSETUP = + AL_REG_FIELD_GET(nand_properties[1], + NAND_CODED_PROPERTIES_WORD_1_TIMING_TSETUP_MASK, + NAND_CODED_PROPERTIES_WORD_1_TIMING_TSETUP_SHIFT); + + dev_properties->timing.tHOLD = + AL_REG_FIELD_GET(nand_properties[1], + NAND_CODED_PROPERTIES_WORD_1_TIMING_THOLD_MASK, + NAND_CODED_PROPERTIES_WORD_1_TIMING_THOLD_SHIFT); + + dev_properties->timing.tWH = + AL_REG_FIELD_GET(nand_properties[1], + NAND_CODED_PROPERTIES_WORD_1_TIMING_TWH_MASK, + NAND_CODED_PROPERTIES_WORD_1_TIMING_TWH_SHIFT); + + dev_properties->timing.tWRP = + AL_REG_FIELD_GET(nand_properties[2], + NAND_CODED_PROPERTIES_WORD_2_TIMING_TWRP_MASK, + NAND_CODED_PROPERTIES_WORD_2_TIMING_TWRP_SHIFT); + + dev_properties->timing.tINTCMD = + AL_REG_FIELD_GET(nand_properties[2], + NAND_CODED_PROPERTIES_WORD_2_TIMING_TINTCMD_MASK, + NAND_CODED_PROPERTIES_WORD_2_TIMING_TINTCMD_SHIFT); + + dev_properties->timing.tRR = + AL_REG_FIELD_GET(nand_properties[2], + NAND_CODED_PROPERTIES_WORD_2_TIMING_TREADYRE_MASK, + NAND_CODED_PROPERTIES_WORD_2_TIMING_TREADYRE_SHIFT); + + dev_properties->timing.tWB = + AL_REG_FIELD_GET(nand_properties[2], + NAND_CODED_PROPERTIES_WORD_2_TIMING_TWB_MASK, + NAND_CODED_PROPERTIES_WORD_2_TIMING_TWB_SHIFT); + + dev_properties->timing.readDelay = + AL_REG_FIELD_GET(nand_properties[2], + NAND_CODED_PROPERTIES_WORD_2_TIMING_TREADDLY_MASK, + NAND_CODED_PROPERTIES_WORD_2_TIMING_TREADDLY_SHIFT); + + return 0; +} + + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_cw_config( + struct al_nand_ctrl_obj *obj, + uint32_t cw_size, + uint32_t cw_count) +{ + uint32_t reg_val; + + al_assert(obj); + + reg_val = 0; + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_CODEWORD_SIZE_CNT_REG_SIZE_MASK, + AL_NAND_CODEWORD_SIZE_CNT_REG_SIZE_SHIFT, + cw_size); + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_CODEWORD_SIZE_CNT_REG_COUNT_MASK, + AL_NAND_CODEWORD_SIZE_CNT_REG_COUNT_SHIFT, + cw_count); + + al_reg_write32(&obj->regs_base->codeword_size_cnt_reg, reg_val); + + al_reg_write32(&obj->wrap_regs_base->code_word_size, cw_size); + + obj->cw_size = cw_size; + obj->cw_count_remaining = obj->cw_count = cw_count; + obj->cw_size_remaining = 0; +} + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_ecc_set_enabled( + struct al_nand_ctrl_obj *obj, + int enabled) +{ + uint32_t reg_val; + + al_assert(obj); + + reg_val = al_reg_read32(&obj->regs_base->bch_ctrl_reg_0); + + AL_REG_BIT_VAL_SET( + reg_val, + AL_NAND_BCH_CTRL_REG_0_ECC_ON_OFF, + enabled); + + al_reg_write32(&obj->regs_base->bch_ctrl_reg_0, reg_val); +} + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_wp_set_enable( + struct al_nand_ctrl_obj *obj, + int enable) +{ + uint32_t reg_val; + + al_assert(obj); + + reg_val = al_reg_read32(&obj->regs_base->ctl_reg0); + + AL_REG_BIT_VAL_SET(reg_val, AL_NAND_CTL_REG0_WP, enable ? 0 : 1); + + al_reg_write32(&obj->regs_base->ctl_reg0, reg_val); +} + +void al_nand_tx_set_enable( + struct al_nand_ctrl_obj *obj, + int enable) +{ + uint32_t reg_val; + + al_assert(obj); + + reg_val = al_reg_read32(&obj->regs_base->ctl_reg0); + + AL_REG_BIT_VAL_SET(reg_val, AL_NAND_CTL_REG0_TX_MODE, enable); + + al_reg_write32(&obj->regs_base->ctl_reg0, reg_val); +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_uncorr_err_get( + struct al_nand_ctrl_obj *obj) +{ + int reg_val; + + al_assert(obj); + + reg_val = al_reg_read32(&obj->regs_base->nfc_int_stat); + + return AL_REG_BIT_GET(reg_val, AL_NAND_NFC_INT_STAT_UNCORR_ERR); +} + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_uncorr_err_clear( + struct al_nand_ctrl_obj *obj) +{ + int reg_val = 0; + + al_assert(obj); + + AL_REG_BIT_SET(reg_val, AL_NAND_NFC_INT_STAT_UNCORR_ERR); + + al_reg_write32(&obj->regs_base->nfc_int_stat, reg_val); +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_corr_err_get( + struct al_nand_ctrl_obj *obj) +{ + int reg_val; + + al_assert(obj); + + reg_val = al_reg_read32(&obj->regs_base->nfc_int_stat); + + return AL_REG_BIT_GET(reg_val, AL_NAND_NFC_INT_STAT_CORR_ERR); +} + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_corr_err_clear( + struct al_nand_ctrl_obj *obj) +{ + int reg_val = 0; + + al_assert(obj); + + AL_REG_BIT_VAL_SET(reg_val, AL_NAND_NFC_INT_STAT_CORR_ERR, 1); + + al_reg_write32(&obj->regs_base->nfc_int_stat, reg_val); +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_dev_is_ready( + struct al_nand_ctrl_obj *obj) +{ + int is_ready; + uint32_t reg_val; + + al_assert(obj); + + reg_val = al_reg_read32(&obj->regs_base->rdy_busy_status_reg); + + if (0 == (reg_val & (1 << obj->current_dev_index))) + is_ready = 0; + else + is_ready = 1; + + return is_ready; +} + +/******************************************************************************/ +/******************************************************************************/ +static void _al_nand_cmd_seq_size_page_read( + struct al_nand_ctrl_obj *obj, + int num_bytes, + int ecc_enabled, + int *cmd_seq_buff_num_entries) +{ + int code_word_size; + int code_word_count; + + int num_required_entries; + + al_assert(obj); + al_assert(cmd_seq_buff_num_entries); + + code_word_size = (512 << obj->ecc_config.messageSize); + + num_bytes = AL_ALIGN_UP(num_bytes, 4); + + if (num_bytes < code_word_size) + code_word_size = num_bytes; + + if ((0 == ecc_enabled) && (0 != (num_bytes % code_word_size))) + code_word_size = num_bytes / 4; + + al_assert(0 == (num_bytes % code_word_size)); + + code_word_count = num_bytes / code_word_size; + + if (ecc_enabled) { + num_required_entries = + 1 + /* first read command */ + obj->dev_properties.num_col_cyc + /* spare off */ + obj->dev_properties.num_row_cyc + /* row address */ + 1 + /* second read command */ + 1 + /* wait for ready */ + 2 + /* spare count read */ + 1 + /* change read column address first command */ + obj->dev_properties.num_col_cyc + /* column address*/ + 1 + /* change read column address second command */ + 1 + /* wait cycle count */ + 2 * code_word_count; /* codeword size */ + } else { + num_required_entries = + 1 + /* first read command */ + obj->dev_properties.num_col_cyc + /* column address*/ + obj->dev_properties.num_row_cyc + /* row address */ + 1 + /* second read command */ + 1 + /* wait for ready */ + 2 * code_word_count; /* codeword size */ + } + + *cmd_seq_buff_num_entries = num_required_entries; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_cmd_seq_gen_page_read( + struct al_nand_ctrl_obj *obj, + int column, + int row, + int num_bytes, + int ecc_enabled, + uint32_t *cmd_seq_buff, + int *cmd_seq_buff_num_entries, + uint32_t *cw_size, + uint32_t *cw_count) +{ + int status = 0; + + int code_word_size; + int code_word_count; + + int num_required_entries; + + al_assert(obj); + al_assert(cmd_seq_buff); + al_assert(cmd_seq_buff_num_entries); + + code_word_size = (512 << obj->ecc_config.messageSize); + + num_bytes = AL_ALIGN_UP(num_bytes, 4); + + if (num_bytes < code_word_size) + code_word_size = num_bytes; + + if ((0 == ecc_enabled) && (0 != (num_bytes % code_word_size))) + code_word_size = num_bytes / 4; + + al_assert(0 == (num_bytes % code_word_size)); + + code_word_count = num_bytes / code_word_size; + + *cw_size = code_word_size; + *cw_count = code_word_count; + + _al_nand_cmd_seq_size_page_read( + obj, + num_bytes, + ecc_enabled, + &num_required_entries); + + if (*cmd_seq_buff_num_entries < num_required_entries) { + al_err( + "not enough entries provided - required %d!\n", + num_required_entries); + + status = -EIO; + goto done; + } + + *cmd_seq_buff_num_entries = 0; + + if (ecc_enabled) { + int spare_count = + code_word_count * _al_nand_get_cw_spare_cnt(obj); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_CMD, + AL_NAND_DEVICE_CMD_PAGE_READ_FIRST); + + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_ADDRESS, + obj->ecc_config.spareAreaOffset / + (obj->dev_properties.sdrDataWidth + 1), + obj->dev_properties.num_col_cyc); + + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_ADDRESS, + row, + obj->dev_properties.num_row_cyc); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_CMD, + AL_NAND_DEVICE_CMD_PAGE_READ_SECOND); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_WAIT_FOR_READY, + 0); + + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_SPARE_READ_COUNT, + spare_count, + 2); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_CMD, + AL_NAND_DEVICE_CMD_CHG_READ_COL_FIRST); + + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_ADDRESS, + column / (obj->dev_properties.sdrDataWidth + 1), + obj->dev_properties.num_col_cyc); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_CMD, + AL_NAND_DEVICE_CMD_CHG_READ_COL_SECOND); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_WAIT_CYCLE_COUNT, + 0); + + while (num_bytes > 0) { + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_DATA_READ_COUNT, + code_word_size, + 2); + + num_bytes -= code_word_size; + } + } else { + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_CMD, + AL_NAND_DEVICE_CMD_PAGE_READ_FIRST); + + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_ADDRESS, + column / (obj->dev_properties.sdrDataWidth + 1), + obj->dev_properties.num_col_cyc); + + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_ADDRESS, + row, + obj->dev_properties.num_row_cyc); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_CMD, + AL_NAND_DEVICE_CMD_PAGE_READ_SECOND); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_WAIT_FOR_READY, + 0); + + while (num_bytes > 0) { + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_DATA_READ_COUNT, + code_word_size, + 2); + + num_bytes -= code_word_size; + } + } + + al_assert(*cmd_seq_buff_num_entries == num_required_entries); + +done: + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_cmd_seq_size_page_write( + struct al_nand_ctrl_obj *obj, + int num_bytes, + int ecc_enabled, + int *cmd_seq_buff_num_entries) +{ + int code_word_size; + int code_word_count; + + int num_required_entries; + + al_assert(obj); + al_assert(cmd_seq_buff_num_entries); + + code_word_size = (512 << obj->ecc_config.messageSize); + + if (num_bytes < code_word_size) + code_word_size = num_bytes; + + if ((0 == ecc_enabled) && (0 != (num_bytes % code_word_size))) + code_word_size = num_bytes / 4; + + al_assert(0 == (num_bytes % code_word_size)); + + code_word_count = num_bytes / code_word_size; + + if (ecc_enabled) { + num_required_entries = + 1 + /* first program command */ + obj->dev_properties.num_col_cyc + /* column address*/ + obj->dev_properties.num_row_cyc + /* row address */ + 1 + /* NOP */ + 1 + /* wait for cycle count */ + 2 * code_word_count + /* codeword size */ + 1 + /* change program column address first command */ + obj->dev_properties.num_col_cyc + /* spare off */ + 1 + /* wait for cycle count */ + 2 + /* spare count write */ + 1 + /* second program command */ + 1; /* wait for ready */ + } else { + num_required_entries = + 1 + /* first program command */ + obj->dev_properties.num_col_cyc + /* column address*/ + obj->dev_properties.num_row_cyc + /* row address */ + 1 + /* NOP */ + 1 + /* wait for cycle count */ + 2 * code_word_count + /* codeword size */ + 1 + /* second program command */ + 1; /* wait for ready */ + } + + *cmd_seq_buff_num_entries = num_required_entries; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_cmd_seq_gen_page_write( + struct al_nand_ctrl_obj *obj, + int column, + int row, + int num_bytes, + int ecc_enabled, + uint32_t *cmd_seq_buff, + int *cmd_seq_buff_num_entries, + uint32_t *cw_size, + uint32_t *cw_count) +{ + int status = 0; + + int code_word_size; + int code_word_count; + + int num_required_entries; + + al_assert(obj); + al_assert(cmd_seq_buff); + al_assert(cmd_seq_buff_num_entries); + + code_word_size = (512 << obj->ecc_config.messageSize); + + num_bytes = AL_ALIGN_UP(num_bytes, 4); + + if (num_bytes < code_word_size) + code_word_size = num_bytes; + + if ((0 == ecc_enabled) && (0 != (num_bytes % code_word_size))) + code_word_size = num_bytes / 4; + + al_assert(0 == (num_bytes % code_word_size)); + + code_word_count = num_bytes / code_word_size; + + *cw_size = code_word_size; + *cw_count = code_word_count; + + al_nand_cmd_seq_size_page_write( + obj, + num_bytes, + ecc_enabled, + &num_required_entries); + + if (*cmd_seq_buff_num_entries < num_required_entries) { + al_err( + "not enough entries provided - required %d!\n", + num_required_entries); + + status = -EIO; + goto done; + } + + *cmd_seq_buff_num_entries = 0; + + if (ecc_enabled) { + int spare_count = + code_word_count * _al_nand_get_cw_spare_cnt(obj); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_CMD, + AL_NAND_DEVICE_CMD_PAGE_PRG_FIRST); + + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_ADDRESS, + column / (obj->dev_properties.sdrDataWidth + 1), + obj->dev_properties.num_col_cyc); + + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_ADDRESS, + row, + obj->dev_properties.num_row_cyc); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_NOP, + 0); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_WAIT_CYCLE_COUNT, + 0); + + while (num_bytes) { + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_DATA_WRITE_COUNT, + code_word_size, + 2); + + num_bytes -= code_word_size; + } + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_CMD, + AL_NAND_DEVICE_CMD_CHG_PRG_COL_FIRST); + + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_ADDRESS, + obj->ecc_config.spareAreaOffset / + (obj->dev_properties.sdrDataWidth + 1), + obj->dev_properties.num_col_cyc); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_WAIT_CYCLE_COUNT, + 0); + + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_SPARE_WRITE_COUNT, + spare_count, + 2); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_CMD, + AL_NAND_DEVICE_CMD_PAGE_PRG_SECOND); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_WAIT_FOR_READY, + 0); + } else { + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_CMD, + AL_NAND_DEVICE_CMD_PAGE_PRG_FIRST); + + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_ADDRESS, + column / (obj->dev_properties.sdrDataWidth + 1), + obj->dev_properties.num_col_cyc); + + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_ADDRESS, + row, + obj->dev_properties.num_row_cyc); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_NOP, + 0); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_WAIT_CYCLE_COUNT, + 0); + + while (num_bytes) { + _al_nand_cmd_seq_add_command_cyc( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_DATA_WRITE_COUNT, + code_word_size, + 2); + + num_bytes -= code_word_size; + } + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_CMD, + AL_NAND_DEVICE_CMD_PAGE_PRG_SECOND); + + _al_nand_cmd_seq_add_command( + &cmd_seq_buff, + cmd_seq_buff_num_entries, + AL_NAND_COMMAND_TYPE_WAIT_FOR_READY, + 0); + } + + al_assert(*cmd_seq_buff_num_entries == num_required_entries); + +done: + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_cmd_single_execute( + struct al_nand_ctrl_obj *obj, + uint32_t cmd) +{ + al_assert(obj); + + al_reg_write32( + obj->cmd_buff_base, + cmd); +} + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_cmd_seq_execute( + struct al_nand_ctrl_obj *obj, + uint32_t *cmd_seq_buff, + int cmd_seq_buff_num_entries) +{ + al_assert(obj); + al_assert(cmd_seq_buff); + + while (cmd_seq_buff_num_entries > 0) { + al_reg_write32( + obj->cmd_buff_base, + *cmd_seq_buff); + + cmd_seq_buff++; + cmd_seq_buff_num_entries--; + } +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_cmd_buff_is_empty( + struct al_nand_ctrl_obj *obj) +{ + uint32_t reg_val; + int empty; + + al_assert(obj); + + reg_val = 0; + + AL_REG_BIT_VAL_SET( + reg_val, + AL_NAND_NFC_INT_STAT_CMD_BUF_EMPTY, + 1); + + al_reg_write32(&obj->regs_base->nfc_int_stat, reg_val); + + reg_val = al_reg_read32(&obj->regs_base->nfc_int_stat); + + empty = AL_REG_BIT_GET(reg_val, AL_NAND_NFC_INT_STAT_CMD_BUF_EMPTY); + + return empty; +} + +/******************************************************************************/ +/******************************************************************************/ +void __iomem *al_nand_data_buff_base_get( + struct al_nand_ctrl_obj *obj) +{ + al_assert(obj); + + return obj->data_buff_base; +} + +int al_nand_data_buff_read( + struct al_nand_ctrl_obj *obj, + int num_bytes, + int num_bytes_skip_head, + int num_bytes_skip_tail, + uint8_t *buff) +{ + int status = 0; + + uint32_t cw_size; + uint32_t cw_size_remaining; + uint32_t cw_count_remaining; + + al_assert(obj); + al_assert(buff); + + cw_size = obj->cw_size; + cw_size_remaining = obj->cw_size_remaining; + cw_count_remaining = obj->cw_count_remaining; + + if (((cw_count_remaining * cw_size) + cw_size_remaining) < + (uint32_t)num_bytes) { + al_err( + "%s: Not enough CW data (%u, %u)!\n", + __func__, + cw_size_remaining, + cw_count_remaining); + + status = -EIO; + goto done; + } + + while (num_bytes > 0) { + uint32_t word; + + uint8_t byte; + + int word_bytes; + + if (0 == cw_size_remaining) { + cw_size_remaining = cw_size; + cw_count_remaining--; + } + + word = al_reg_read32( + obj->data_buff_base); + + for ( + word_bytes = 0; + (word_bytes < (int)sizeof(word)) && (num_bytes > 0); + word_bytes++, num_bytes--, word >>= 8) { + byte = word & 0xFF; + + if ( + (0 == num_bytes_skip_head) && + (num_bytes > num_bytes_skip_tail)) { + *buff = byte; + buff++; + } + + if (num_bytes_skip_head > 0) + num_bytes_skip_head--; + } + + cw_size_remaining -= 4; + } + + obj->cw_size_remaining = cw_size_remaining; + obj->cw_count_remaining = cw_count_remaining; + +done: + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_data_buff_write( + struct al_nand_ctrl_obj *obj, + int num_bytes, + const uint8_t *buff) +{ + int status = 0; + + uint32_t cw_size; + uint32_t cw_size_remaining; + uint32_t cw_count_remaining; + + al_assert(obj); + al_assert(buff); + + cw_size = obj->cw_size; + cw_size_remaining = obj->cw_size_remaining; + cw_count_remaining = obj->cw_count_remaining; + + if (((cw_count_remaining * cw_size) + cw_size_remaining) < + (uint32_t)num_bytes) { + al_err( + "%s: Not enough CW data (%u, %u)!\n", + __func__, + cw_size_remaining, + cw_count_remaining); + + status = -EIO; + goto done; + } + + while (num_bytes > 0) { + uint32_t word; + int word_bytes; + + if (0 == cw_size_remaining) { + _al_nand_data_buf_wait_for_cw_vacancy(obj); + cw_size_remaining = cw_size; + cw_count_remaining--; + } + + for ( + word_bytes = 0, word = 0; + (word_bytes < (int)sizeof(word)) && (num_bytes > 0); + word_bytes++, num_bytes--, buff++) { + word >>= 8; + + word |= (uint32_t)(*buff) << 24; + } + + for ( + ; + word_bytes < (int)sizeof(word); + word_bytes++) { + word >>= 8; + } + + al_reg_write32( + &obj->regs_base->data_buffer_reg, + word); + + cw_size_remaining -= 4; + } + +done: + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +static int _al_nand_dev_config_modes( + struct al_nand_ctrl_obj *obj) +{ + int status = 0; + + uint32_t reg_val; + + struct al_nand_dev_properties *dev_properties = + &obj->dev_properties; + + al_assert(obj); + + reg_val = 0; + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_MODE_SELECT_MODE_SELECT_MASK, + AL_NAND_MODE_SELECT_MODE_SELECT_SHIFT, + AL_NAND_MODE_SELECT_SDR); + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_MODE_SELECT_SDR_TIM_MODE_MASK, + AL_NAND_MODE_SELECT_SDR_TIM_MODE_SHIFT, + dev_properties->timingMode); + + al_reg_write32(&obj->regs_base->mode_select_reg, reg_val); + + return status; +} + +static void _al_nand_dev_config_ctl_reg_0( + struct al_nand_ctrl_obj *obj) +{ + uint32_t reg_val; + + struct al_nand_dev_properties *dev_properties = + &obj->dev_properties; + + al_assert(obj); + + reg_val = al_reg_read32(&obj->regs_base->ctl_reg0); + + AL_REG_BIT_VAL_SET( + reg_val, + AL_NAND_CTL_REG0_DQ_WIDTH, + dev_properties->sdrDataWidth); + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_CTL_REG0_COL_ADDR_CYCLES_MASK, + AL_NAND_CTL_REG0_COL_ADDR_CYCLES_SHIFT, + dev_properties->num_col_cyc); + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_CTL_REG0_ROW_ADDR_CYCLES_MASK, + AL_NAND_CTL_REG0_ROW_ADDR_CYCLES_SHIFT, + dev_properties->num_row_cyc); + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_CTL_REG0_PAGE_SIZE_MASK, + AL_NAND_CTL_REG0_PAGE_SIZE_SHIFT, + dev_properties->pageSize); + + al_reg_write32(&obj->regs_base->ctl_reg0, reg_val); +} + +static void _al_nand_dev_config_sdr_timing_params( + struct al_nand_ctrl_obj *obj) +{ + uint32_t reg_val; + + struct al_nand_dev_properties *dev_properties = + &obj->dev_properties; + + al_assert(obj); + + reg_val = + + AL_REG_BITS_FIELD( + AL_NAND_SDR_TIM_PARAMS_0_T_SETUP_SHIFT, + dev_properties->timing.tSETUP) | + + AL_REG_BITS_FIELD( + AL_NAND_SDR_TIM_PARAMS_0_T_HOLD_SHIFT, + dev_properties->timing.tHOLD) | + + AL_REG_BITS_FIELD( + AL_NAND_SDR_TIM_PARAMS_0_T_WH_SHIFT, + dev_properties->timing.tWH) | + + AL_REG_BITS_FIELD( + AL_NAND_SDR_TIM_PARAMS_0_T_WRP_SHIFT, + dev_properties->timing.tWRP) | + + AL_REG_BITS_FIELD( + AL_NAND_SDR_TIM_PARAMS_0_T_INTCMD_SHIFT, + dev_properties->timing.tINTCMD); + + al_reg_write32(&obj->regs_base->sdr_timing_params_0, reg_val); + + reg_val = + + AL_REG_BITS_FIELD( + AL_NAND_SDR_TIM_PARAMS_1_T_RR_SHIFT, + dev_properties->timing.tRR) | + + AL_REG_BITS_FIELD( + AL_NAND_SDR_TIM_PARAMS_1_T_WB_SHIFT, + dev_properties->timing.tWB & + (AL_NAND_SDR_TIM_PARAMS_1_T_WB_MASK >> + AL_NAND_SDR_TIM_PARAMS_1_T_WB_SHIFT)) | + + AL_REG_BITS_FIELD( + AL_NAND_SDR_TIM_PARAMS_1_T_READ_DLY_SHIFT, + dev_properties->timing.readDelay) | + + AL_REG_BITS_FIELD( + AL_NAND_SDR_TIM_PARAMS_1_T_WB_MSB_SHIFT, + dev_properties->timing.tWB >> + AL_NAND_SDR_TIM_PARAMS_1_T_WB_WIDTH); + + al_reg_write32(&obj->regs_base->sdr_timing_params_1, reg_val); +} + +static void _al_nand_dev_config_rdy_busy_wait_cnt( + struct al_nand_ctrl_obj *obj) +{ + uint32_t reg_val; + + struct al_nand_dev_properties *dev_properties = + &obj->dev_properties; + + al_assert(obj); + + reg_val = 0; + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_RDYBSY_WAIT_CNT_REG_RDY_TOUT_CNT_MASK, + AL_NAND_RDYBSY_WAIT_CNT_REG_RDY_TOUT_CNT_SHIFT, + dev_properties->readyBusyTimeout); + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_RDYBSY_WAIT_CNT_REG_RDYBSYEN_MASK, + AL_NAND_RDYBSY_WAIT_CNT_REG_RDYBSYEN_SHIFT, + (dev_properties->readyBusyTimeout > 0) ? 0 : 1); + + al_reg_write32(&obj->regs_base->rdy_busy_wait_cnt_reg, reg_val); +} + +static void _al_nand_dev_config_bch_ctl_reg_0( + struct al_nand_ctrl_obj *obj) +{ + uint32_t reg_val; + + struct al_nand_ecc_config *ecc_config = + &obj->ecc_config; + + al_assert(obj); + + reg_val = al_reg_read32(&obj->regs_base->bch_ctrl_reg_0); + + AL_REG_BIT_VAL_SET( + reg_val, + AL_NAND_BCH_CTRL_REG_0_ECC_ALGORITHM, + ecc_config->algorithm); + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_BCH_CTRL_REG_0_BCH_T_MASK, + AL_NAND_BCH_CTRL_REG_0_BCH_T_SHIFT, + obj->ecc_config.num_corr_bits); + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_BCH_CTRL_REG_0_BCH_N_MASK, + AL_NAND_BCH_CTRL_REG_0_BCH_N_SHIFT, + _al_nand_get_bch_cw_parity_size(obj)); + + al_reg_write32(&obj->regs_base->bch_ctrl_reg_0, reg_val); +} + +static void _al_nand_dev_config_bch_ctl_reg_1( + struct al_nand_ctrl_obj *obj) +{ + uint32_t reg_val; + + struct al_nand_ecc_config *ecc_config = + &obj->ecc_config; + + al_assert(obj); + + reg_val = 0; + + AL_REG_FIELD_SET( + reg_val, + AL_NAND_BCH_CTRL_REG_1_BCH_K_MASK, + AL_NAND_BCH_CTRL_REG_1_BCH_K_SHIFT, + ecc_config->messageSize); + + al_reg_write32(&obj->regs_base->bch_ctrl_reg_1, reg_val); +} + +static void _al_nand_data_buf_wait_for_cw_vacancy( + struct al_nand_ctrl_obj *obj) +{ + uint32_t reg_val; + + do { + reg_val = 0; + + AL_REG_BIT_VAL_SET( + reg_val, + AL_NAND_NFC_INT_STAT_BUF_WR_RDY, + 1); + + al_reg_write32( + &obj->regs_base->nfc_int_stat, + reg_val); + + reg_val = al_reg_read32(&obj->regs_base->nfc_int_stat); + + } while (!AL_REG_BIT_GET(reg_val, AL_NAND_NFC_INT_STAT_BUF_WR_RDY)); +} + +static void _al_nand_cmd_seq_add_command( + uint32_t **cmd_seq_buff, + int *cmd_seq_buff_num_entries, + enum al_nand_command_type type, + uint8_t argument) +{ + **cmd_seq_buff = AL_NAND_CMD_SEQ_ENTRY(type, argument); + + (*cmd_seq_buff)++; + (*cmd_seq_buff_num_entries)++; +} + +static void _al_nand_cmd_seq_add_command_cyc( + uint32_t **cmd_seq_buff, + int *cmd_seq_buff_num_entries, + enum al_nand_command_type type, + int argument, + int num_cyc) +{ + while (num_cyc) { + **cmd_seq_buff = AL_NAND_CMD_SEQ_ENTRY(type, argument & 0xFF); + + (*cmd_seq_buff)++; + (*cmd_seq_buff_num_entries)++; + num_cyc--; + argument >>= 8; + } +} + +static int _al_nand_get_bch_num_corr_bits( + struct al_nand_ctrl_obj *obj) +{ + return 4 * (1 + obj->ecc_config.num_corr_bits); +} + +static int _al_nand_get_bch_cw_parity_size( + struct al_nand_ctrl_obj *obj) +{ + return 15 * _al_nand_get_bch_num_corr_bits(obj); +} + +static int _al_nand_get_cw_spare_cnt( + struct al_nand_ctrl_obj *obj) +{ + if (obj->ecc_config.algorithm == AL_NAND_ECC_ALGORITHM_HAMMING) { + return 4; + } else { + return AL_ALIGN_UP( + ((_al_nand_get_bch_cw_parity_size(obj) + 7) / 8), 4); + } +} + +/******************************************************************************/ +/******************************************************************************/ +uint32_t al_nand_int_status_get( + struct al_nand_ctrl_obj *obj) +{ + al_assert(obj); + + return al_reg_read32(&obj->regs_base->nfc_int_stat); +} + +void al_nand_int_enable( + struct al_nand_ctrl_obj *obj, + uint32_t int_mask) +{ + uint32_t val; + + al_assert(obj); + + /* before enabling the interrupt the status must be cleared cause the + * controller doesn't clear the status when it no longer valid */ + al_reg_write32(&obj->regs_base->nfc_int_stat, int_mask); + + val = al_reg_read32(&obj->regs_base->nfc_int_en); + AL_REG_MASK_SET(val, int_mask); + al_reg_write32(&obj->regs_base->nfc_int_en, val); +} + +void al_nand_int_disable( + struct al_nand_ctrl_obj *obj, + uint32_t int_mask) +{ + uint32_t val; + + al_assert(obj); + + val = al_reg_read32(&obj->regs_base->nfc_int_en); + AL_REG_MASK_CLEAR(val, int_mask); + al_reg_write32(&obj->regs_base->nfc_int_en, val); +} + +void al_nand_int_clear( + struct al_nand_ctrl_obj *obj, + uint32_t int_mask) +{ + al_assert(obj); + + al_reg_write32(&obj->regs_base->nfc_int_stat, int_mask); +} diff --git a/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand.h b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand.h new file mode 100644 index 00000000000000..9321af861741de --- /dev/null +++ b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand.h @@ -0,0 +1,1006 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @addtogroup group_nand NAND controller + * @ingroup group_pbs + * @{ + * The NAND controller is activated mostly through executing command sequences + * and reading/writing from/to its data buffer. + * Both command sequence execution and data buffer reading/writing can either be + * obtained directly through memory-mapped read/write access, or through a DMA + * Command sequences for page reading/writing can be generated using dedicated + * API functions. + * Command sequences for other NAND operations can be manually constructed. + * + * A typical initialization flow: + * - al_nand_init + * - al_nand_dev_select + * - al_nand_dev_config_basic + * - al_nand_cmd_seq_execute(reset sequence) + * - al_nand_cmd_seq_execute(read properties page / read id sequence) + * - al_nand_data_buff_read + * - al_nand_cmd_seq_execute(set features sequence) + * - al_nand_data_buff_write + * - al_nand_dev_config(device specific config) + * + * A typical non DMA page reading flow: + * - al_nand_cmd_seq_size_page_read + * - allocate appropriate sequence buffer + * - al_nand_cmd_seq_gen_page_read + * - al_nand_cw_config + * - al_nand_cmd_seq_execute + * - al_nand_data_buff_read + * + * A typical DMA page reading flow: + * - al_nand_cmd_seq_size_page_read + * - allocate appropriate sequence physical buffer + * - al_nand_cmd_seq_gen_page_read + * - al_nand_cw_config_dma + * - al_nand_cmd_seq_execute_dma(no interrupt) + * - al_nand_data_buff_read_dma(with interrupt) + * - wait for interrupt + * - al_nand_transaction_completion (for the command sequence) + * - al_nand_transaction_completion (for the read transaction) + * + * @file al_hal_nand.h + * + * @brief Header file for the NAND HAL driver + * + */ + +#ifndef __AL_HAL_NAND_H__ +#define __AL_HAL_NAND_H__ + +#include +#include +#include "../../../../dma/al/al_hal_ssm_raid.h" +#include "al_hal_nand_defs.h" + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/** + * NAND controller initialization + * + * Initializes all resources required for operating the NAND controller. + * This function should be called prior to any other attempt to access the + * controller. + * A handle to an object is initialized and shall be used in all other API + * calls. + * + * @param nand_base + * The base address for accessing to NAND buffs and regs. + * + * @param raid_dma + * An allocated RAID DMA handle, or NULL if DMA is not required + * + * @param raid_dma_qid + * An allocated RAID DMA queue handle (applicable if raid_dma != + * NULL) + * + * @param obj + * The initialized object + * + * @return 0 if no error found. + * + */ +int al_nand_init( + struct al_nand_ctrl_obj *obj, + void __iomem *nand_base, + struct al_ssm_dma *raid_dma, + uint32_t raid_dma_qid); + +/** + * NAND controller termination + * + * Releases all resources previously initialized for operating the NAND + * controller. + * No function besides 'al_nand_init' can be called after calling this + * function. + * + * @param obj + * The object context + * + */ +void al_nand_terminate( + struct al_nand_ctrl_obj *obj); + +/** + * NAND controller reset + * + * Resets various sub-units of the NAND controller according to a mask provided + * by the caller. + * + * @param obj + * The object context + * + * @param reset_mask + * A bitwise OR combination of one or more sub-units. + * + * @see AL_NAND_RESET_MASK_SOFT + * @see AL_NAND_RESET_MASK_CMD_FIFO + * @see AL_NAND_RESET_MASK_DATA_FIFO + * @see AL_NAND_RESET_MASK_DDRRX_FIFO + * @see AL_NAND_RESET_MASK_CMD_ENGINE + * @see AL_NAND_RESET_MASK_TIMING_ENGINE + * + */ +void al_nand_reset( + struct al_nand_ctrl_obj *obj, + int reset_mask); + +/** + * NAND device selection + * + * Selects one of the devices connected to the NAND controller as the active + * device. Following device operations will act upon it. + * + * @param obj + * The object context + * + * @param device_index + * The index of the device to be selected + * + * @see AL_NAND_MAX_NUM_DEVICES + * + */ +void al_nand_dev_select( + struct al_nand_ctrl_obj *obj, + int device_index); + +/** + * NAND device standard basic config + * + * Configures the currently selected NAND device with standard basic + * config that can be used only for resetting the device and for reading + * its ID and properties page. + * + * @param obj + * The object context + * + * @return 0 if no error found. + * + */ +int al_nand_dev_config_basic( + struct al_nand_ctrl_obj *obj); + +/** + * NAND device config + * + * Configures the currently selected NAND device. The config involves + * both setting the device properties and ECC config. + * + * @param obj + * The object context + * + * @param dev_properties + * NAND device properties (device specific) + * + * @param ecc_config + * ECC config (application requirements) + * + * @return 0 if no error found. + * + */ +int al_nand_dev_config( + struct al_nand_ctrl_obj *obj, + struct al_nand_dev_properties *dev_properties, + struct al_nand_ecc_config *ecc_config); + +/** + * NAND decode properties + * + * Read properties from pbs registers and parse them. + * + * @param pbs_regs_base + * PBS regs base address + * + * @param dev_properties + * NAND device properties (device specific) + * + * @param ecc_config + * ECC config (application requirements) + * + * @param dev_ext_props + * NAND device extra properties + * + * @return 0 if no error found. + * + */ +int al_nand_properties_decode( + void __iomem *pbs_regs_base, + struct al_nand_dev_properties *dev_properties, + struct al_nand_ecc_config *ecc_config, + struct al_nand_extra_dev_properties *dev_ext_props); + +/** + * NAND code word configuration + * + * Configures the code word settings for the next read/write sequence + * + * @param obj + * The object context + * + * @param cw_size + * Code word size [bytes] + * + * @param cw_count + * Code word count + * + */ +void al_nand_cw_config( + struct al_nand_ctrl_obj *obj, + uint32_t cw_size, + uint32_t cw_count); + +/** + * NAND code word configuration through DMA - preapre buffer + * + * Configures the code word settings for the next read/write sequence through + * DMA - buffer preparation phase. + * + * @param obj + * The object context + * + * @param cw_size + * Code word size [bytes] + * + * @param cw_count + * Code word count + * + * @param buff_arr + * An allocated temporary DMA transaction buffer array. + * The required number of elements in the array is 2. + * The required buffer size is 4 bytes. + * + */ +void al_nand_cw_config_buffs_prepare( + struct al_nand_ctrl_obj *obj, + uint32_t cw_size, + uint32_t cw_count, + uint32_t *buff_arr[2]); + +/** + * NAND code word configuration through DMA - send buffer + * + * Configures the code word settings for the next read/write sequence through + * DMA - buffer sending phase. + * Call 'al_nand_transaction_completion' to check and acknowledge completion. + * + * @param obj + * The object context + * + * @param tx_buff_arr + * A DMA transaction buffer array. + * The required number of elements in the array is 2. + * The required buffer size is 4 bytes. + * The buffers in the array can only be freed upon completion. + * + * @param trigger_interrupt + * Whether or not interrupt should be triggered upon completion + * of this transaction + * + * @param num_transactions + * The number of DMA transactions generated by this request. + * Each should be acknowledged by calling + * 'al_nand_transaction_completion'. + * + * @return 0 if no error found. + * + */ +int al_nand_cw_config_dma( + struct al_nand_ctrl_obj *obj, + struct al_buf tx_buff_arr[2], + int trigger_interrupt, + int *num_transactions); + +/** + * NAND ECC enable state setting + * + * Enables/disables ECC + * + * @param obj + * The object context + * + * @param enabled + * The required ECC enable state + * + */ +void al_nand_ecc_set_enabled( + struct al_nand_ctrl_obj *obj, + int enabled); + +/** + * Write protection enabling/disabling + * + * Enables or disables NAND device write protection (by controlling the WP + * signal) + * + * @param obj + * The object context + * + * @param enable + * A flag for either enabling or disabling write protection + * + */ +void al_nand_wp_set_enable( + struct al_nand_ctrl_obj *obj, + int enable); + +/** + * TX enabling/disabling + * + * Enables or disables NAND device TX mode + * + * @param obj + * The object context + * + * @param enable + * A flag for either enabling or disabling the TX mode + * + */ +void al_nand_tx_set_enable( + struct al_nand_ctrl_obj *obj, + int enable); + +/** + * Miscellaneous control through DMA - buffer prepare phase + * + * Miscellaneous control through DMA: + * - Enables or disables NAND device write protection (by controlling the WP + * signal) + * - Enables or disables NAND device TX mode + * + * @param obj + * The object context + * + * @param wp_enable + * A flag for either enabling or disabling write protection + * + * @param tx_enable + * A flag for either enabling or disabling the TX mode + * + * @param tx_buff_arr + * An allocated temporary DMA transaction buffer array. + * The required number of elements in the array is 1. + * The required buffer size is 4 bytes. + * + */ +void al_nand_misc_ctrl_buffs_prepare( + struct al_nand_ctrl_obj *obj, + int wp_enable, + int tx_enable, + uint32_t *tx_buff_arr[1]); + +/** + * Miscellaneous control through DMA - buffer sending phase + * + * Miscellaneous control through DMA: + * - Enables or disables NAND device write protection (by controlling the WP + * signal) + * - Enables or disables NAND device TX mode + * + * @param obj + * The object context + * + * @param tx_buff_arr + * An allocated DMA transaction buffer array. + * The required number of elements in the array is 1. + * The required buffer size is 4 bytes. + * The buffers in the array can only be freed upon completion. + * + * @param trigger_interrupt + * Whether or not interrupt should be triggered upon completion + * of this request + * + * @param num_transactions + * The number of DMA transactions generated by this request. + * Each should be acknowledged by calling + * 'al_nand_transaction_completion'. + * + * @return 0 if no error found. + * + */ +int al_nand_misc_ctrl_dma( + struct al_nand_ctrl_obj *obj, + struct al_buf tx_buff_arr[1], + int trigger_interrupt, + int *num_transactions); + +/** + * Uncorrectable Error Status Getting + * + * Gets current status of uncorrectable errors - whether happened or not + * + * @param obj + * The object context + * + * @return 0 if no uncorrectable errors + * @return 1 if uncorrectable errors + * + */ +int al_nand_uncorr_err_get( + struct al_nand_ctrl_obj *obj); + +/** + * Uncorrectable Error Status Clearing + * + * Clears current status of uncorrectable errors + * + * @param obj + * The object context + * + */ +void al_nand_uncorr_err_clear( + struct al_nand_ctrl_obj *obj); + +/** + * Correctable Error Status Getting + * + * Gets current status of correctable errors - whether happened or not + * + * @param obj + * The object context + * + * @return 0 if no correctable errors + * @return 1 if correctable errors + * + * @return 0 if no error found. + * + */ +int al_nand_corr_err_get( + struct al_nand_ctrl_obj *obj); + +/** + * Correctable Error Status Clearing + * + * Clears current status of correctable errors + * + * @param obj + * The object context + * + */ +void al_nand_corr_err_clear( + struct al_nand_ctrl_obj *obj); + +/** + * NAND device testing for being ready + * + * Checks the state of the NAND device ready/busy# signal + * + * @param obj + * The object context + * + * @return A flag indicating whether the device is ready of busy. + * + */ +int al_nand_dev_is_ready( + struct al_nand_ctrl_obj *obj); + +/** + * NAND device page reading command sequence size obtaining + * + * Obtains the required size for a command sequence for reading a NAND device + * page. + * + * @param obj + * The object context + * + * @param num_bytes + * The number of bytes to read + * + * @param ecc_enabled + * Whether or not to enable HW ECC + * + * @param cmd_seq_buff_num_entries + * The required number of entries + * + * @return 0 if no error found. + * + */ +int al_nand_cmd_seq_size_page_read( + struct al_nand_ctrl_obj *obj, + int num_bytes, + int ecc_enabled, + int *cmd_seq_buff_num_entries); + +/** + * NAND device page reading command sequence generation + * + * Generates a command sequence for reading a a NAND device page. + * + * @param obj + * The object context + * + * @param column + * The byte address within the page + * + * @param row + * The page address + * + * @param num_bytes + * The number of bytes to read + * + * @param ecc_enabled + * Whether or not to enable HW ECC + * + * @param cmd_seq_buff + * An allocated command sequence buffer + * + * @param cmd_seq_buff_num_entries + * in: the number of entries in the command sequence buffer + * out: the number of used entries in the command sequence buffer + * + * @param cw_size + * The code word size to be configured prior to executing the + * sequence + * + * @param cw_count + * The code word count to be configured prior to executing the + * sequence + * + * @return 0 if no error found. + * + */ +int al_nand_cmd_seq_gen_page_read( + struct al_nand_ctrl_obj *obj, + int column, + int row, + int num_bytes, + int ecc_enabled, + uint32_t *cmd_seq_buff, + int *cmd_seq_buff_num_entries, + uint32_t *cw_size, + uint32_t *cw_count); + +/** + * NAND device page writing command sequence size obtaining + * + * Obtains the required size for a command sequence for writing a NAND device + * page. + * + * @param obj + * The object context + * + * @param num_bytes + * The number of bytes to write + * + * @param ecc_enabled + * Whether or not to enable HW ECC + * + * @param cmd_seq_buff_num_entries + * The required number of entries + * + */ +void al_nand_cmd_seq_size_page_write( + struct al_nand_ctrl_obj *obj, + int num_bytes, + int ecc_enabled, + int *cmd_seq_buff_num_entries); + +/** + * NAND device page writing command sequence generation + * + * Generates a command sequence for writing a a NAND device page. + * + * @param obj + * The object context + * + * @param column + * The byte address within the page + * + * @param row + * The page address + * + * @param num_bytes + * The number of bytes to write + * + * @param ecc_enabled + * Whether or not to enable HW ECC + * + * @param cmd_seq_buff + * An allocated command sequence buffer + * + * @param cmd_seq_buff_num_entries + * in: the number of entries in the command sequence buffer + * out: the number of used entries in the command sequence buffer + * + * @param cw_size + * The code word size to be configured prior to executing the + * sequence + * + * @param cw_count + * The code word count to be configured prior to executing the + * sequence + * + * @return 0 if no error found. + * + */ +int al_nand_cmd_seq_gen_page_write( + struct al_nand_ctrl_obj *obj, + int column, + int row, + int num_bytes, + int ecc_enabled, + uint32_t *cmd_seq_buff, + int *cmd_seq_buff_num_entries, + uint32_t *cw_size, + uint32_t *cw_count); + +/** + * NAND controller command constructor + * + * Constructs a NAND controller command + * + * @param type + * The command type + * + * @param arg + * The command argument + * + * @see al_nand_command_type + * + * @return 0 if no error found. + * + */ +#define AL_NAND_CMD_SEQ_ENTRY(type, arg) \ + (((type) << 8) | (arg)) + +/** + * NAND controller single command execution + * + * Executes a single NAND controller command. + * + * @param obj + * The object context + * + * @param cmd + * The command to be executed + * + */ +void al_nand_cmd_single_execute( + struct al_nand_ctrl_obj *obj, + uint32_t cmd); + +/** + * NAND controller command sequence execution + * + * Executes a NAND controller command sequence. + * + * @param obj + * The object context + * + * @param cmd_seq_buff + * The command sequence buffer + * + * @param cmd_seq_buff_num_entries + * The command sequence buffer number of entries + * + */ +void al_nand_cmd_seq_execute( + struct al_nand_ctrl_obj *obj, + uint32_t *cmd_seq_buff, + int cmd_seq_buff_num_entries); + +/** + * NAND controller command sequence execution + * + * Executes a NAND controller command sequence. + * + * @param obj + * The object context + * + * @return An indication of whether the command buffer is empty + * + */ +int al_nand_cmd_buff_is_empty( + struct al_nand_ctrl_obj *obj); + +/** + * NAND controller command sequence execution through DMA + * + * Begins executing a NAND controller command sequence using a DMA. + * Call 'al_nand_transaction_completion' to check and acknowledge completion. + * + * @param obj + * The object context + * + * @param cmd_seq_buff + * The command sequence buffer + * The buffer shall remain allocated until transaction completion + * + * @param trigger_interrupt + * Whether or not interrupt should be triggered upon completion + * of this transaction + * + * @return 0 if no error found. + * + */ +int al_nand_cmd_seq_execute_dma( + struct al_nand_ctrl_obj *obj, + struct al_buf *cmd_seq_buff, + int trigger_interrupt); + +/** + * NAND controller command FIFO scion sending through DMA - buffer preparation + * phase + * + * Sends NAND controller command FIFO scion using a DMA. + * + * @param obj + * The object context + * + * @param buff + * An allocated DMA transaction buffer. + * The required buffer size is 128 bytes. + * + * @return 0 if no error found. + * + */ +void al_nand_cmd_seq_scion_buff_prepare( + struct al_nand_ctrl_obj *obj, + uint32_t *buff); + +/** + * NAND controller command FIFO scion sending through DMA - buffer sending + * phase + * + * Sends NAND controller command FIFO scion using a DMA. + * Call 'al_nand_transaction_completion' to check and acknowledge completion. + * + * @param obj + * The object context + * + * @param tx_buff + * An allocated temporary DMA transaction buffer. + * The required buffer size is 128 bytes. + * The buffers in the array can only be freed upon completion. + * + * @param trigger_interrupt + * Whether or not interrupt should be triggered upon completion + * of this transaction + * + * @param num_transactions + * The number of DMA transactions generated by this request. + * Each should be acknowledged by calling + * 'al_nand_transaction_completion'. + * + * @return 0 if no error found. + * + */ +int al_nand_cmd_seq_scion_dma( + struct al_nand_ctrl_obj *obj, + struct al_buf *tx_buff, + int trigger_interrupt, + int *num_transactions); + +/** + * Get the data buff address. + * + * @param obj + * The object context + * + * @return the data buff address + * + */ +void __iomem *al_nand_data_buff_base_get( + struct al_nand_ctrl_obj *obj); + +/** + * NAND controller data buffer reading + * + * Reads from the NAND controller data buffer. + * Data become available in the data buffer according to prior commands being + * written to the controller command FIFO. + * This function is blocking. + * + * @param obj + * The object context + * + * @param num_bytes + * The number of bytes to read + * + * @param num_bytes_skip_head + * The number of bytes to skip at the beginning of reading + * + * @param num_bytes_skip_tail + * The number of bytes to skip at the end of reading + * + * @param buff + * The read data + * + * @return 0 if no error found. + * + */ +int al_nand_data_buff_read( + struct al_nand_ctrl_obj *obj, + int num_bytes, + int num_bytes_skip_head, + int num_bytes_skip_tail, + uint8_t *buff); + +/** + * NAND controller data buffer reading through DMA + * + * Begins reading from the NAND controller data buffer using a DMA. + * Call 'al_nand_transaction_completion' to check and acknowledge completion. + * + * @param obj + * The object context + * + * @param buff + * The read data + * The buffer shall remain allocated until transaction completion + * + * @param trigger_interrupt + * Whether or not interrupt should be triggered upon completion + * of this transaction + * + * @return 0 if no error found. + * + */ +int al_nand_data_buff_read_dma( + struct al_nand_ctrl_obj *obj, + struct al_buf *buff, + int trigger_interrupt); + +/** + * NAND controller data buffer writing + * + * Writes to the NAND controller data buffer. + * + * @param obj + * The object context + * + * @param num_bytes + * The number of bytes to write + * + * @param buff + * The data to write + * + * @return 0 if no error found. + * + */ +int al_nand_data_buff_write( + struct al_nand_ctrl_obj *obj, + int num_bytes, + const uint8_t *buff); + +/** + * NAND controller data buffer writing through DMA + * + * Begins writing to the NAND controller data buffer using a DMA. + * Call 'al_nand_transaction_completion' to check and acknowledge completion. + * + * @param obj + * The object context + * + * @param buff + * The data to write + * The buffer shall remain allocated until transaction completion + * + * @param trigger_interrupt + * Whether or not interrupt should be triggered upon completion + * of this transaction + * + * @return 0 if no error found. + * + */ +int al_nand_data_buff_write_dma( + struct al_nand_ctrl_obj *obj, + struct al_buf *buff, + int trigger_interrupt); + +/** + * Check and cleanup completed transaction + * + * @param obj + * The object context + * + * @param comp_status + * The status reported by rx completion descriptor + * + * @return 1 if a transaction was completed. 0 otherwise + * + */ +int al_nand_transaction_completion( + struct al_nand_ctrl_obj *obj, + uint32_t *comp_status); + +/** + * Get the interrupt status register + * + * @param obj + * The object context + * + * @return the interrupt status register value + * + */ +uint32_t al_nand_int_status_get( + struct al_nand_ctrl_obj *obj); + +/** + * Enable interrupts for the mask status + * + * @param obj + * The object context + * + * @param int_mask + * the interrupt's status mask to enable + * + */ +void al_nand_int_enable( + struct al_nand_ctrl_obj *obj, + uint32_t int_mask); + +/** + * Disable interrupts for the mask status + * + * @param obj + * The object context + * + * @param int_mask + * the interrupt's status mask to disable + * + */ +void al_nand_int_disable( + struct al_nand_ctrl_obj *obj, + uint32_t int_mask); + +/** + * Clear interrupts for the mask status + * + * @param obj + * The object context + * + * @param int_mask + * the interrupt's status mask to clear + * + */ +void al_nand_int_clear( + struct al_nand_ctrl_obj *obj, + uint32_t int_mask); + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of NAND group */ +#endif /* __AL_HAL_NAND_H__ */ diff --git a/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand_coded_properties.h b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand_coded_properties.h new file mode 100644 index 00000000000000..8b659151673051 --- /dev/null +++ b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand_coded_properties.h @@ -0,0 +1,312 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @addtogroup group_nand NAND controller + * @ingroup group_pbs + * @{ + * @file al_hal_nand_coded_properties.h + * + * @brief Header file for the NAND coded properties + * + */ + +#ifndef __NAND_CODED_PROPERTIES_H__ +#define __NAND_CODED_PROPERTIES_H__ + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +#define NAND_CODED_PROPERTIES_NUM_WORDS 4 + +/******************************************************************************* + * Word 0 +*******************************************************************************/ + +/* + * Is valid: + * 0 - not valid + * 1 - valid + */ +#define NAND_CODED_PROPERTIES_WORD_0_IS_VALID_SHIFT 31 +#define NAND_CODED_PROPERTIES_WORD_0_IS_VALID_MASK 0x80000000 + +/* + * Page size: + * 000 - 512 bytes + * 001 - reserved + * 010 - 2048 bytes + * 011 - 4096 bytes + * 100 - 8192 bytes + * 101 - 16384 bytes + * 110 - reserved + * 111 - reserved + */ +#define NAND_CODED_PROPERTIES_WORD_0_PAGE_SIZE_SHIFT 28 +#define NAND_CODED_PROPERTIES_WORD_0_PAGE_SIZE_MASK 0x70000000 + +/* + * Block size: + * 000 - 16 pages + * 001 - 32 pages + * 010 - 64 pages + * 011 - 128 pages + * 100 - 256 pages + * 101 - 512 pages + * 110 - 1024 pages + * 111 - 2048 pages + */ +#define NAND_CODED_PROPERTIES_WORD_0_BLOCK_SIZE_SHIFT 25 +#define NAND_CODED_PROPERTIES_WORD_0_BLOCK_SIZE_MASK 0x0E000000 + +/* + * Bus width: + * 0 - 8 bits + * 1 - 16 bits + */ +#define NAND_CODED_PROPERTIES_WORD_0_BUS_WIDTH_SHIFT 24 +#define NAND_CODED_PROPERTIES_WORD_0_BUS_WIDTH_MASK 0x01000000 + +/* + * Num column cycles: + * 00 - 1 cycle + * 01 - 2 cycles + * 10 - 3 cycles + * 11 - 4 cycles + */ +#define NAND_CODED_PROPERTIES_WORD_0_COL_CYCLES_SHIFT 22 +#define NAND_CODED_PROPERTIES_WORD_0_COL_CYCLES_MASK 0x00C00000 + +/* + * Num row cycles: + * 000 - 1 cycle + * 001 - 2 cycles + * 010 - 3 cycles + * 011 - 4 cycles + * 100 - 5 cycles + * 101 - 6 cycles + * 110 - 7 cycles + * 111 - 8 cycles + */ +#define NAND_CODED_PROPERTIES_WORD_0_ROW_CYCLES_SHIFT 19 +#define NAND_CODED_PROPERTIES_WORD_0_ROW_CYCLES_MASK 0x00380000 + +/* + * Bad block marking method: + * 000 - Disabled + * 001 - Check only first page of each block + * 010 - Check first and second page of each block + * 011 - Check only last page of each block + * 100 - Check last and last-2 page of each block + * 101 - reserved + * 110 - reserved + * 111 - reserved + */ +#define NAND_CODED_PROPERTIES_WORD_0_BBM_METHOD_SHIFT 16 +#define NAND_CODED_PROPERTIES_WORD_0_BBM_METHOD_MASK 0x00070000 + +/* + * Bad block marking location 1: + * 0000 - 1111 - word (depending on bus width) location within spare area + */ +#define NAND_CODED_PROPERTIES_WORD_0_BBM_LOC1_SHIFT 12 +#define NAND_CODED_PROPERTIES_WORD_0_BBM_LOC1_MASK 0x0000F000 + +/* + * Bad block marking location 2: + * 0000 - 1111 - word (depending on bus width) location within spare area + */ +#define NAND_CODED_PROPERTIES_WORD_0_BBM_LOC2_SHIFT 8 +#define NAND_CODED_PROPERTIES_WORD_0_BBM_LOC2_MASK 0x00000F00 + +/* Reserved */ +#define NAND_CODED_PROPERTIES_WORD_0_RESERVED_SHIFT 5 +#define NAND_CODED_PROPERTIES_WORD_0_RESERVED_MASK 0x000000E0 + +/* + * Timing parameter set: + * 000 - ONFI 0 + * 001 - ONFI 1 + * 010 - ONFI 2 + * 011 - ONFI 3 + * 100 - ONFI 4 + * 101 - ONFI 5 + * 110 - manual + */ +#define NAND_CODED_PROPERTIES_WORD_0_TIMING_SET_SHIFT 2 +#define NAND_CODED_PROPERTIES_WORD_0_TIMING_SET_MASK 0x0000001C + +/* + * ECC algorithm: + * 00 - ECC disabled + * 01 - Hamming (1 bit per 512 bytes) + * 10 - BCH + */ +#define NAND_CODED_PROPERTIES_WORD_0_ECC_ALG_SHIFT 0 +#define NAND_CODED_PROPERTIES_WORD_0_ECC_ALG_MASK 0x00000003 + +/******************************************************************************* + * Word 1 +*******************************************************************************/ + +/* + * BCH required strength: + * 0000 - 4 bits + * 0001 - 8 bits + * 0010 - 12 bits + * 0011 - 16 bits + * 0100 - 20 bits + * 0101 - 24 bits + * 0110 - 28 bits + * 0111 - 32 bits + * 1000 - 36 bits + * 1001 - 40 bits + */ +#define NAND_CODED_PROPERTIES_WORD_1_ECC_BCH_STRENGTH_SHIFT 28 +#define NAND_CODED_PROPERTIES_WORD_1_ECC_BCH_STRENGTH_MASK 0xF0000000 + +/* + * BCH code word size: + * 0 - 512 bytes + * 1 - 1024 bytes + */ +#define NAND_CODED_PROPERTIES_WORD_1_ECC_BCH_CODEWORD_SHIFT 27 +#define NAND_CODED_PROPERTIES_WORD_1_ECC_BCH_CODEWORD_MASK 0X08000000 + +/* + * ECC location in spare area: + * 9 bits - location within spare area (0 - 511) + */ +#define NAND_CODED_PROPERTIES_WORD_1_ECC_LOC_SHIFT 18 +#define NAND_CODED_PROPERTIES_WORD_1_ECC_LOC_MASK 0X07FC0000 + +/* Timing - tSETUP - 6 bits */ +#define NAND_CODED_PROPERTIES_WORD_1_TIMING_TSETUP_SHIFT 12 +#define NAND_CODED_PROPERTIES_WORD_1_TIMING_TSETUP_MASK 0x0003F000 + +/* Timing - tHOLD - 6 bits */ +#define NAND_CODED_PROPERTIES_WORD_1_TIMING_THOLD_SHIFT 6 +#define NAND_CODED_PROPERTIES_WORD_1_TIMING_THOLD_MASK 0x00000FC0 + +/* Timing - tWH - 6 bits */ +#define NAND_CODED_PROPERTIES_WORD_1_TIMING_TWH_SHIFT 0 +#define NAND_CODED_PROPERTIES_WORD_1_TIMING_TWH_MASK 0x0000003F + +/******************************************************************************* + * Word 2 +*******************************************************************************/ + +/* Timing - tWRP - 6 bits */ +#define NAND_CODED_PROPERTIES_WORD_2_TIMING_TWRP_SHIFT 26 +#define NAND_CODED_PROPERTIES_WORD_2_TIMING_TWRP_MASK 0xFC000000 + +/* Timing - tINTCMD - 6 bits */ +#define NAND_CODED_PROPERTIES_WORD_2_TIMING_TINTCMD_SHIFT 20 +#define NAND_CODED_PROPERTIES_WORD_2_TIMING_TINTCMD_MASK 0x03F00000 + +/* Timing - tReadyRE - 6 bits */ +#define NAND_CODED_PROPERTIES_WORD_2_TIMING_TREADYRE_SHIFT 14 +#define NAND_CODED_PROPERTIES_WORD_2_TIMING_TREADYRE_MASK 0x000FC000 + +/* Timing - tWB - 7 bits */ +#define NAND_CODED_PROPERTIES_WORD_2_TIMING_TWB_SHIFT 7 +#define NAND_CODED_PROPERTIES_WORD_2_TIMING_TWB_MASK 0x00003F80 + +/* Timing - tReadDly - 2 bits - SDR only */ +#define NAND_CODED_PROPERTIES_WORD_2_TIMING_TREADDLY_SHIFT 5 +#define NAND_CODED_PROPERTIES_WORD_2_TIMING_TREADDLY_MASK 0x00000060 + +/******************************************************************************* + * Word 3 +*******************************************************************************/ + +/* Reserved */ +#define NAND_CODED_PROPERTIES_WORD_3_RESERVED_SHIFT 0 +#define NAND_CODED_PROPERTIES_WORD_3_RESERVED_MASK 0xFFFFFFFF + +/* + +Examples of bad block marking: + +Micron +------ +Before NAND Flash devices are shipped from Micron, they are erased. +The factory identifies invalid blocks before shipping by programming data other +than FFh (x8) or FFFFh (x16) into the first spare location +(column address 2,048 for x8 devices, or 1,024 for x16 devices) of the first or +second page of each bad block. + +Toshiba +------- +Read Check: Read column 517 of the 1st page in the block. +If the column is not FFh, define the block as a bad block. + +Read Check : Read either column 0 or 2048 of the 1st page or the 2nd page +of each block. If the data of the column is not FF (Hex), +define the block as a bad block. + +Numonyx +------- +The devices are supplied with all the locations inside valid blocks +erased (FFh). The bad block information is written prior to shipping. +Any block where the 6th byte (x8 devices)/1st word (x16 devices), +in the spare area of the 1st page, does not contain FFh is a bad block. + +Samsung +------- +All device locations are erased(FFh) except locations where the initial +invalid block(s) information is written prior to shipping. +The initial invalid block(s) status is defined by the 1st byte(1st word) in the +spare area. Samsung makes sure that Both of 1st and 2nd page of every initial +invalid block has non-FFh data at the column address of 4096(x16:2048). +Since the initial invalid block information is also erasable in most cases, +it is impossible to recover the information once it has been erased. +Therefore, the system must be able to recognize the initial invalid block(s) +based on the original initial invalid block information and create the initial +invalid block table via the following suggested flow chart(Figure 5). +Any intentional erasure of the original initial invalid block information +is prohibited. + +*/ + +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of NAND group */ +#endif diff --git a/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand_defs.h b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand_defs.h new file mode 100644 index 00000000000000..18771abac6b638 --- /dev/null +++ b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand_defs.h @@ -0,0 +1,419 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @addtogroup group_nand NAND controller + * @ingroup group_pbs + * @{ + * @file al_hal_nand_defs.h + * + * @brief Header file for the NAND HAL driver definitions + * + */ + +#ifndef __AL_HAL_NAND_DEFS_H__ +#define __AL_HAL_NAND_DEFS_H__ + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/** Soft Reset + */ +#define AL_NAND_RESET_MASK_SOFT (1 << 0) + +/** Command FIFO Reset + */ +#define AL_NAND_RESET_MASK_CMD_FIFO (1 << 1) + +/** Data FIFO Reset + */ +#define AL_NAND_RESET_MASK_DATA_FIFO (1 << 2) + +/** DDR receive FIFO reset + */ +#define AL_NAND_RESET_MASK_DDRRX_FIFO (1 << 3) + +/** CMD Engine module reset + * It will reset command engine after finishing currently executing + * instruction. + */ +#define AL_NAND_RESET_MASK_CMD_ENGINE (1 << 4) + +/** Timing Engine reset module + * It waits for current operation to finish on Nand Bus before applying reset + * to avoid timing glitches on Nand Bus. Later this also clears command buffer. + */ +#define AL_NAND_RESET_MASK_TIMING_ENGINE (1 << 5) + +/** Title + * Description + */ +#define AL_NAND_MAX_NUM_DEVICES 8 + +/*** interrupts status ***/ +/* Command buffer empty status */ +#define AL_NAND_INTR_STATUS_CMD_BUF_EMPTY (1 << 0) +/* Command buffer full status */ +#define AL_NAND_INTR_STATUS_CMD_BUF_FULL (1 << 1) +/* Data buffer empty status */ +#define AL_NAND_INTR_STATUS_DATA_BUF_EMPTY (1 << 2) +/* Data buffer full status */ +#define AL_NAND_INTR_STATUS_DATA_BUF_FULL (1 << 3) +/* Correctable error status */ +#define AL_NAND_INTR_STATUS_CORR_ERROR (1 << 4) +/* Uncorrectable error status */ +#define AL_NAND_INTR_STATUS_UNCORR_ERROR (1 << 5) +/* Indicate if PKT_SIZE amount of space + * is available in Buffer */ +#define AL_NAND_INTR_STATUS_BUF_WRRDY (1 << 6) +/* Indicate if PKT_SIZE amount of data + * is available in Buffer. */ +#define AL_NAND_INTR_STATUS_BUF_RDRDY (1 << 7) +/* set when the specified operation mentioned + * in the COMMAND_REG is completed */ +#define AL_NAND_INTR_STATUS_WRRD_DONE (1 << 8) +/* set when the DMA Based write/read is completed. + * Asserted high for both Write and Read operation + * on every page buffer boundary for DMA. */ +#define AL_NAND_INTR_STATUS_DMA_DONE (1 << 9) +/* Set high for both Write and Read operation + * on end of every DMA Transaction */ +#define AL_NAND_INTR_STATUS_TRANS_COMP (1 << 10) +/* Command buffer overflow status */ +#define AL_NAND_INTR_STATUS_CMD_BUF_OVERFLOW (1 << 11) +/* Command buffer underflow status */ +#define AL_NAND_INTR_STATUS_CMD_BUF_UNDERFLOW (1 << 12) +/* Data buffer overflow status */ +#define AL_NAND_INTR_STATUS_DATA_BUF_OVERFLOW (1 << 13) +/* Data buffer underflow status */ +#define AL_NAND_INTR_STATUS_DATA_BUF_UNDERFLOW (1 << 14) +/* DMA transaction done interrupt status */ +#define AL_NAND_INTR_STATUS_DMA_TRANS_DONE (1 << 15) +/* DMA Buffer boundary Cross interrupt status */ +#define AL_NAND_INTR_STATUS_DMA_BOUNDARY_CROSS (1 << 16) +/* Slave error interrupt during DMA operations */ +#define AL_NAND_INTR_STATUS_SLAVE_ERROR (1 << 17) + +/** Title + * Description + */ +enum al_nand_device_timing_mode { + AL_NAND_DEVICE_TIMING_MODE_ONFI_0 = 0, + AL_NAND_DEVICE_TIMING_MODE_ONFI_1 = 1, + AL_NAND_DEVICE_TIMING_MODE_ONFI_2 = 2, + AL_NAND_DEVICE_TIMING_MODE_ONFI_3 = 3, + AL_NAND_DEVICE_TIMING_MODE_ONFI_4 = 4, + AL_NAND_DEVICE_TIMING_MODE_ONFI_5 = 5, + AL_NAND_DEVICE_TIMING_MODE_MANUAL = 6, +}; + +/** NAND device SDR timing - read delay + * Delay that is used for sampling flash_data after re_n goes high (number of + * cycles after re_n). + */ +enum al_nand_device_timing_sdr_read_delay { + AL_NAND_DEVIE_TIMING_READ_DELAY_1 = 2, + AL_NAND_DEVIE_TIMING_READ_DELAY_2 = 3, +}; + +/** Title + * Description + */ +struct al_nand_device_timing { + + /** Setup time [controller cycles] + * The time between assertion of the ALE, CLE, I/O signals and WE_L + * assertion. + */ + uint8_t tSETUP; + + /** Hold time [controller cycles] + * The time ALE, CLE or I/O is held asserted after the desertion of the + * WE_L signal. + */ + uint8_t tHOLD; + + /** WE_n high pulse width [controller cycles] + */ + uint8_t tWH; + + /** WE_L and RE_L assertions duration [controller cycles] + */ + uint8_t tWRP; + + /** Inter command gap [controller cycles] + */ + uint8_t tINTCMD; + + /** Ready to data output cycle [controller cycles] + */ + uint8_t tRR; + + /** The waiting time for specific command operations [controller cycles] + * This timing should be provided whenever cmd1 present is set. + * For example, Read Command requires the waiting time (tWB) to specify + * the wait duration between the last Command Latch Cycle and R/~B. + */ + uint8_t tWB; + + /** Read delay (SDR only) + */ + uint8_t readDelay; + + /** Clock divider value [1 - 64] (DDR/DDR2 only) + */ + int tCLKDiv; + + /** CE_n setup time [controller cycles] (DDR/DDR2 only) + */ + int tCE_n; + + /** tDQS in delay tap value (DDR/DDR2 only) + */ + int tDQS_in; + + /** tDQS out delay (DDR/DDR2 only) + */ + int tDQS_out; +}; + +/** Title + * Description + */ +enum al_nand_device_sdr_data_width { + AL_NAND_DEVICE_SDR_DATA_WIDTH_8 = 0, + AL_NAND_DEVICE_SDR_DATA_WIDTH_16 = 1, /* Not supported */ +}; + +/** Title + * Description + */ +enum al_nand_device_page_size { + AL_NAND_DEVICE_PAGE_SIZE_2K = 0, + AL_NAND_DEVICE_PAGE_SIZE_4K = 1, + AL_NAND_DEVICE_PAGE_SIZE_8K = 2, + AL_NAND_DEVICE_PAGE_SIZE_16K = 3, + + /* TODO: Check if really supported */ + AL_NAND_DEVICE_PAGE_SIZE_512 = 4, +}; + +/** Title + * Description + */ +struct al_nand_dev_properties { + enum al_nand_device_timing_mode timingMode; + + enum al_nand_device_sdr_data_width sdrDataWidth; + + struct al_nand_device_timing timing; + + /** Timeout value for ready busy signal + * 0 - r_b_n pin value is used for checking ready busy + */ + int readyBusyTimeout; + + int num_col_cyc; + + int num_row_cyc; + + enum al_nand_device_page_size pageSize; +}; + +/** Title + * Description + */ +enum al_nand_ecc_algorithm { + AL_NAND_ECC_ALGORITHM_HAMMING = 0, + AL_NAND_ECC_ALGORITHM_BCH = 1, +}; + +/** Title + * Description + */ +enum al_nand_ecc_bch_num_corr_bits { + AL_NAND_ECC_BCH_NUM_CORR_BITS_4 = 0, + AL_NAND_ECC_BCH_NUM_CORR_BITS_8 = 1, + AL_NAND_ECC_BCH_NUM_CORR_BITS_12 = 2, + AL_NAND_ECC_BCH_NUM_CORR_BITS_16 = 3, + AL_NAND_ECC_BCH_NUM_CORR_BITS_20 = 4, + AL_NAND_ECC_BCH_NUM_CORR_BITS_24 = 5, + AL_NAND_ECC_BCH_NUM_CORR_BITS_28 = 6, + AL_NAND_ECC_BCH_NUM_CORR_BITS_32 = 7, + AL_NAND_ECC_BCH_NUM_CORR_BITS_36 = 8, + AL_NAND_ECC_BCH_NUM_CORR_BITS_40 = 9, +}; + +/** Title + * Description + */ +enum al_nand_ecc_bch_message_size { + AL_NAND_ECC_BCH_MESSAGE_SIZE_512 = 0, + AL_NAND_ECC_BCH_MESSAGE_SIZE_1024 = 1, +}; + +/** Title + * Description + */ +struct al_nand_ecc_config { + enum al_nand_ecc_algorithm algorithm; + + enum al_nand_ecc_bch_num_corr_bits num_corr_bits; + + enum al_nand_ecc_bch_message_size messageSize; + + int spareAreaOffset; +}; + +enum al_nand_bad_block_marking_method { + NAND_BAD_BLOCK_MARKING_METHOD_DISABLED = 0, + NAND_BAD_BLOCK_MARKING_CHECK_1ST_PAGE, + NAND_BAD_BLOCK_MARKING_CHECK_1ST_PAGES, + NAND_BAD_BLOCK_MARKING_CHECK_LAST_PAGE, + NAND_BAD_BLOCK_MARKING_CHECK_LAST_PAGES, +}; + +struct al_nand_bad_block_marking { + enum al_nand_bad_block_marking_method method; + int location1; + int location2; +}; + +struct al_nand_extra_dev_properties { + unsigned int pageSize; + unsigned int blockSize; + unsigned int wordSize; + + struct al_nand_bad_block_marking badBlockMarking; + + int eccIsEnabled; +}; + +/** Nand Controller Object */ +struct al_nand_ctrl_obj { + struct al_nand_regs *regs_base; + + struct al_nand_wrap_regs *wrap_regs_base; + void *cmd_buff_base; + void *data_buff_base; + + struct al_ssm_dma *raid_dma; + uint32_t raid_dma_qid; + + struct al_nand_dev_properties dev_properties; + struct al_nand_ecc_config ecc_config; + int current_dev_index; + + uint32_t cw_size; + uint32_t cw_count; + + uint32_t cw_size_remaining; + uint32_t cw_count_remaining; +}; + +enum al_nand_command_type { + /** no operation + */ + AL_NAND_COMMAND_TYPE_NOP = 0, + + /** Command + * Activate CLE and write a command byte to the NAND device + * Argument: 1 byte command + */ + AL_NAND_COMMAND_TYPE_CMD = 2, + + /** Address + * Activate ALE and write an address byte to the NAND device + * Argument: 1 byte address (repeat for several address cycles) + */ + AL_NAND_COMMAND_TYPE_ADDRESS = 3, + + /** Wait cycle count + */ + AL_NAND_COMMAND_TYPE_WAIT_CYCLE_COUNT = 4, + + /** Wait for ready + * Wait for the NAND device to become ready + */ + AL_NAND_COMMAND_TYPE_WAIT_FOR_READY = 5, + + /** Count for read data + * Requests reading of bytes from the NAND device. + * This field is 2 bytes (LSB first). + * Argumet: LSB upon first occurance, MSB upon second occurance + */ + AL_NAND_COMMAND_TYPE_DATA_READ_COUNT = 6, + + /** Count for write data + * Requests writing of bytes to the NAND device. + * This field is 2 bytes (LSB first). + * Argumet: LSB upon first occurance, MSB upon second occurance + */ + AL_NAND_COMMAND_TYPE_DATA_WRITE_COUNT = 7, + + /** Status read command + * Requests reading of NAND device status. + * Argumet: Number of status bytes to read (1-4) + */ + AL_NAND_COMMAND_TYPE_STATUS_READ = 8, + + /** Spare read + */ + AL_NAND_COMMAND_TYPE_SPARE_READ_COUNT = 9, + + /** Spare write + */ + AL_NAND_COMMAND_TYPE_SPARE_WRITE_COUNT = 10, + + /** Status write command + * (Write command with direct data in cmd) + */ + AL_NAND_COMMAND_TYPE_STATUS_WRITE = 11, +}; + +struct al_nand_command { + enum al_nand_command_type type; + uint8_t argument; +}; + +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +/** @} end of NAND group */ +#endif /* __AL_HAL_NAND_DEFS_H__ */ diff --git a/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand_dma.c b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand_dma.c new file mode 100644 index 00000000000000..9819fc5a796dd8 --- /dev/null +++ b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand_dma.c @@ -0,0 +1,504 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "al_hal_nand.h" +#include "al_hal_nand_regs.h" + +static int _al_nand_reg_write_dma( + struct al_nand_ctrl_obj *obj, + uintptr_t reg_addr, + struct al_buf *tx_buff, + int trigger_interrupt); + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_cw_config_buffs_prepare( + struct al_nand_ctrl_obj *obj, + uint32_t cw_size, + uint32_t cw_count, + uint32_t *buff_arr[2]) +{ + al_assert(obj); + al_assert(buff_arr); + + *(buff_arr[0]) = + AL_REG_BITS_FIELD( + AL_NAND_CODEWORD_SIZE_CNT_REG_SIZE_SHIFT, + cw_size) | + AL_REG_BITS_FIELD( + AL_NAND_CODEWORD_SIZE_CNT_REG_COUNT_SHIFT, + cw_count); + + *(buff_arr[1]) = + cw_size; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_cw_config_dma( + struct al_nand_ctrl_obj *obj, + struct al_buf tx_buff_arr[2], + int trigger_interrupt, + int *num_transactions) +{ + int status = 0; + + al_assert(obj); + al_assert(tx_buff_arr); + al_assert(num_transactions); + + if (0 != _al_nand_reg_write_dma( + obj, + (uintptr_t)&obj->regs_base->codeword_size_cnt_reg, + &tx_buff_arr[0], + 0)) { + + al_err("_al_nand_reg_write_dma failed!\n"); + status = -EIO; + goto done; + } + + if (0 != _al_nand_reg_write_dma( + obj, + (uintptr_t)&obj->wrap_regs_base->code_word_size, + &tx_buff_arr[1], + trigger_interrupt)) { + + al_err("_al_nand_reg_write_dma failed!\n"); + status = -EIO; + goto done; + } + +done: + + *num_transactions = 2; + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_misc_ctrl_buffs_prepare( + struct al_nand_ctrl_obj *obj, + int wp_enable, + int tx_enable, + uint32_t *buff_arr[1]) +{ + uint32_t reg_val; + + al_assert(obj); + al_assert(buff_arr); + + reg_val = al_reg_read32(&obj->regs_base->ctl_reg0); + + AL_REG_BIT_VAL_SET(reg_val, AL_NAND_CTL_REG0_WP, wp_enable ? 0 : 1); + + AL_REG_BIT_VAL_SET(reg_val, AL_NAND_CTL_REG0_TX_MODE, tx_enable); + + *(buff_arr[0]) = reg_val; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_misc_ctrl_dma( + struct al_nand_ctrl_obj *obj, + struct al_buf tx_buff_arr[1], + int trigger_interrupt, + int *num_transactions) +{ + int status = 0; + + al_assert(obj); + al_assert(tx_buff_arr); + al_assert(num_transactions); + + if (0 != _al_nand_reg_write_dma( + obj, + (uintptr_t)&obj->regs_base->ctl_reg0, + &tx_buff_arr[0], + trigger_interrupt)) { + + al_err("_al_nand_reg_write_dma failed!\n"); + status = -EIO; + goto done; + } + +done: + + *num_transactions = 1; + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_cmd_seq_execute_dma( + struct al_nand_ctrl_obj *obj, + struct al_buf *cmd_seq_buff, + int trigger_interrupt) +{ + int status = 0; + + struct al_buf transaction_dst_buf; + struct al_block transaction_src_block; + struct al_block transaction_dst_block; + struct al_raid_transaction transaction; + + al_assert(obj); + al_assert(cmd_seq_buff); + + transaction_src_block.bufs = cmd_seq_buff; + transaction_src_block.num = 1; + + transaction_dst_buf.addr = + (al_phys_addr_t)(uintptr_t)obj->cmd_buff_base; + transaction_dst_buf.len = cmd_seq_buff->len; + + transaction_dst_block.bufs = &transaction_dst_buf; + transaction_dst_block.num = 1; + + transaction.op = AL_RAID_OP_MEM_CPY; + + transaction.flags = + AL_RAID_BARRIER | (trigger_interrupt ? AL_RAID_INTERRUPT : 0); + + transaction.srcs_blocks = &transaction_src_block; + transaction.num_of_srcs = 1; + transaction.total_src_bufs = 1; + transaction.dsts_blocks = &transaction_dst_block; + transaction.num_of_dsts = 1; + transaction.total_dst_bufs = 1; + + if (0 != al_raid_dma_prepare( + obj->raid_dma, + obj->raid_dma_qid, + &transaction)) { + al_err("al_raid_dma_prepare failed!\n"); + status = -EIO; + goto done; + } + + if (0 != al_raid_dma_action( + obj->raid_dma, + obj->raid_dma_qid, + transaction.tx_descs_count)) { + al_err("al_raid_dma_action failed!\n"); + status = -EIO; + goto done; + } + +done: + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +void al_nand_cmd_seq_scion_buff_prepare( + struct al_nand_ctrl_obj *obj __attribute__((__unused__)), + uint32_t *buff) +{ + int i; + + for (i = 0; i < AL_NAND_CMD_FIFO_DEPTH; i++) + buff[i] = AL_NAND_CMD_SEQ_ENTRY(AL_NAND_COMMAND_TYPE_NOP, 0); +} + +int al_nand_cmd_seq_scion_dma( + struct al_nand_ctrl_obj *obj, + struct al_buf *tx_buff, + int trigger_interrupt, + int *num_transactions) +{ + int status = 0; + + struct al_buf transaction_dst_buf; + struct al_block transaction_src_block; + struct al_block transaction_dst_block; + struct al_raid_transaction transaction; + + al_assert(obj); + al_assert(tx_buff); + al_assert(num_transactions); + + transaction_src_block.bufs = tx_buff; + transaction_src_block.num = 1; + + transaction_dst_buf.addr = + (al_phys_addr_t)(uintptr_t)obj->cmd_buff_base; + transaction_dst_buf.len = tx_buff->len; + + transaction_dst_block.bufs = &transaction_dst_buf; + transaction_dst_block.num = 1; + + transaction.op = AL_RAID_OP_MEM_CPY; + + transaction.flags = + AL_RAID_BARRIER | (trigger_interrupt ? AL_RAID_INTERRUPT : 0); + + transaction.srcs_blocks = &transaction_src_block; + transaction.num_of_srcs = 1; + transaction.total_src_bufs = 1; + transaction.dsts_blocks = &transaction_dst_block; + transaction.num_of_dsts = 1; + transaction.total_dst_bufs = 1; + + if (0 != al_raid_dma_prepare( + obj->raid_dma, + obj->raid_dma_qid, + &transaction)) { + al_err("al_raid_dma_prepare failed!\n"); + status = -EIO; + goto done; + } + + if (0 != al_raid_dma_action( + obj->raid_dma, + obj->raid_dma_qid, + transaction.tx_descs_count)) { + al_err("al_raid_dma_action failed!\n"); + status = -EIO; + goto done; + } + + *num_transactions = 1; + +done: + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_data_buff_read_dma( + struct al_nand_ctrl_obj *obj, + struct al_buf *buff, + int trigger_interrupt) +{ + int status = 0; + + struct al_buf transaction_src_buf; + struct al_block transaction_src_block; + struct al_block transaction_dst_block; + struct al_raid_transaction transaction; + + al_assert(obj); + al_assert(buff); + + transaction_dst_block.bufs = buff; + transaction_dst_block.num = 1; + + transaction_src_buf.addr = + (al_phys_addr_t)(uintptr_t)obj->data_buff_base; + transaction_src_buf.len = buff->len; + + transaction_src_block.bufs = &transaction_src_buf; + transaction_src_block.num = 1; + + transaction.op = AL_RAID_OP_MEM_CPY; + + transaction.flags = + AL_RAID_BARRIER | (trigger_interrupt ? AL_RAID_INTERRUPT : 0); + + transaction.srcs_blocks = &transaction_src_block; + transaction.num_of_srcs = 1; + transaction.total_src_bufs = 1; + transaction.dsts_blocks = &transaction_dst_block; + transaction.num_of_dsts = 1; + transaction.total_dst_bufs = 1; + + if (0 != al_raid_dma_prepare( + obj->raid_dma, + obj->raid_dma_qid, + &transaction)) { + al_err("al_raid_dma_prepare failed!\n"); + status = -EIO; + goto done; + } + + if (0 != al_raid_dma_action( + obj->raid_dma, + obj->raid_dma_qid, + transaction.tx_descs_count)) { + al_err("al_raid_dma_action failed!\n"); + status = -EIO; + goto done; + } + +done: + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_data_buff_write_dma( + struct al_nand_ctrl_obj *obj, + struct al_buf *buff, + int trigger_interrupt) +{ + int status = 0; + + struct al_buf transaction_dst_buf; + struct al_block transaction_dst_block; + struct al_block transaction_src_block; + struct al_raid_transaction transaction; + + al_assert(obj); + al_assert(buff); + + transaction_src_block.bufs = buff; + transaction_src_block.num = 1; + + transaction_dst_buf.addr = + (al_phys_addr_t)(uintptr_t)obj->data_buff_base; + transaction_dst_buf.len = buff->len; + + transaction_dst_block.bufs = &transaction_dst_buf; + transaction_dst_block.num = 1; + + transaction.op = AL_RAID_OP_MEM_CPY; + + transaction.flags = + AL_RAID_BARRIER | (trigger_interrupt ? AL_RAID_INTERRUPT : 0); + + transaction.srcs_blocks = &transaction_src_block; + transaction.num_of_srcs = 1; + transaction.total_src_bufs = 1; + transaction.dsts_blocks = &transaction_dst_block; + transaction.num_of_dsts = 1; + transaction.total_dst_bufs = 1; + + if (0 != al_raid_dma_prepare( + obj->raid_dma, + obj->raid_dma_qid, + &transaction)) { + al_err("al_raid_dma_prepare failed!\n"); + status = -EIO; + goto done; + } + + if (0 != al_raid_dma_action( + obj->raid_dma, + obj->raid_dma_qid, + transaction.tx_descs_count)) { + al_err("al_raid_dma_action failed!\n"); + status = -EIO; + goto done; + } + +done: + + return status; +} + +/******************************************************************************/ +/******************************************************************************/ +int al_nand_transaction_completion( + struct al_nand_ctrl_obj *obj, + uint32_t *comp_status) +{ + int retVal; + + al_assert(obj); + + retVal = al_raid_dma_completion( + obj->raid_dma, + obj->raid_dma_qid, + comp_status); + + return retVal; +} + +/******************************************************************************/ +/******************************************************************************/ +static int _al_nand_reg_write_dma( + struct al_nand_ctrl_obj *obj, + uintptr_t reg_addr, + struct al_buf *tx_buff, + int trigger_interrupt) +{ + int status = 0; + + struct al_buf transaction_dst_buf; + struct al_block transaction_src_block; + struct al_block transaction_dst_block; + struct al_raid_transaction transaction; + + transaction_src_block.bufs = tx_buff; + transaction_src_block.num = 1; + + transaction_dst_buf.addr = (al_phys_addr_t)reg_addr; + transaction_dst_buf.len = tx_buff->len; + + transaction_dst_block.bufs = &transaction_dst_buf; + transaction_dst_block.num = 1; + + transaction.op = AL_RAID_OP_MEM_CPY; + + transaction.flags = + AL_RAID_BARRIER | (trigger_interrupt ? AL_RAID_INTERRUPT : 0); + + transaction.srcs_blocks = &transaction_src_block; + transaction.num_of_srcs = 1; + transaction.total_src_bufs = 1; + transaction.dsts_blocks = &transaction_dst_block; + transaction.num_of_dsts = 1; + transaction.total_dst_bufs = 1; + + if (0 != al_raid_dma_prepare( + obj->raid_dma, + obj->raid_dma_qid, + &transaction)) { + al_err("al_raid_dma_prepare failed!\n"); + status = -EIO; + goto done; + } + + if (0 != al_raid_dma_action( + obj->raid_dma, + obj->raid_dma_qid, + transaction.tx_descs_count)) { + al_err("al_raid_dma_action failed!\n"); + status = -EIO; + goto done; + } + +done: + + return status; +} diff --git a/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand_regs.h b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand_regs.h new file mode 100644 index 00000000000000..9420fb6671098b --- /dev/null +++ b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_hal_nand_regs.h @@ -0,0 +1,296 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#ifndef __AL_HAL_NAND_REGS_H__ +#define __AL_HAL_NAND_REGS_H__ + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +#define AL_NAND_CMD_FIFO_DEPTH 32 + +struct al_nand_regs { + uint32_t rsrvd0[0x44 / sizeof(uint32_t)]; + uint32_t flash_ctl_3; /* 0x0044 */ + uint32_t rsrvd1[(0x400 - 0x48) / sizeof(uint32_t)]; + uint32_t mode_select_reg; /* 0x0400 */ + uint32_t ctl_reg0; /* 0x0404 */ + uint32_t bch_ctrl_reg_0; /* 0x0408 */ + uint32_t bch_ctrl_reg_1; /* 0x040c */ + uint32_t command_reg; /* 0x0410 */ + uint32_t nflash_row_addr; /* 0x0414 */ + uint32_t nflash_col_addr; /* 0x0418 */ + uint32_t codeword_size_cnt_reg; /* 0x041c */ + uint32_t command_buffer_reg; /* 0x0420 */ + uint32_t data_buffer_reg; /* 0x0424 */ + uint32_t nflash_spare_offset; /* 0x0428 */ + uint32_t sdr_timing_params_0; /* 0x042c */ + uint32_t sdr_timing_params_1; /* 0x0430 */ + uint32_t ddr_timing_params_0; /* 0x0434 */ + uint32_t ddr_timing_params_1; /* 0x0438 */ + uint32_t ddr2_timing_params_0; /* 0x043c */ + uint32_t ddr2_timing_params_1; /* 0x0440 */ + uint32_t rdy_busy_wait_cnt_reg; /* 0x0444 */ + uint32_t nfc_int_en; /* 0x0448 */ + uint32_t rdy_busy_status_reg; /* 0x044c */ + uint32_t nfc_int_stat; /* 0x0450 */ + uint32_t flash_status_reg; /* 0x0454 */ + uint32_t ddr_timing_params_2; /* 0x0458 */ + uint32_t ddr2_timing_params_2; /* 0x045c */ + uint32_t reset_reg; /* 0x0460 */ + uint32_t xip_ctrl_reg_0; /* 0x0464 */ + uint32_t xip_ctrl_reg_1; /* 0x0468 */ + uint32_t xip_ctrl_reg_2; /* 0x046c */ + uint32_t clk_select_reg; /* 0x0470 */ + uint32_t bch_status_reg; /* 0x0474 */ + uint32_t dma_sector_addr_reg; /* 0x0478 */ + uint32_t dma_addr_reg; /* 0x047c */ + uint32_t dma_ctrl_reg; /* 0x0480 */ + uint32_t xip_ctrl_reg_3; /* 0x0484 */ + uint32_t reset_status_reg; /* 0x048c */ +}; + +struct al_nand_wrap_regs { + uint32_t rsrvd0; + uint32_t code_word_size; /* 0x0004 */ +}; + +/* Register fields: flash_ctl_3 */ +#define AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_MASK(idx) \ + AL_FIELD_MASK(((idx) * 2 + 1), ((idx) * 2)) +#define AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_SHIFT(idx) \ + ((idx) * 2) +#define AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_VAL_NUM_BNKS 6 +#define AL_NAND_FLASH_CTL_3_FLASH_TYPE_BNK_VAL_NAND 1 + +/* Register fields: mode_select_reg */ +#define AL_NAND_MODE_SELECT_MODE_SELECT_MASK AL_FIELD_MASK(2, 0) +#define AL_NAND_MODE_SELECT_MODE_SELECT_SHIFT 0 + +#define AL_NAND_MODE_SELECT_SDR 0 + +#define AL_NAND_MODE_SELECT_SDR_TIM_MODE_MASK AL_FIELD_MASK(6, 3) +#define AL_NAND_MODE_SELECT_SDR_TIM_MODE_SHIFT 3 + +#define AL_NAND_MODE_SELECT_DDR_TIM_MODE_MASK AL_FIELD_MASK(10, 7) +#define AL_NAND_MODE_SELECT_DDR_TIM_MODE_SHIFT 7 + +#define AL_NAND_MODE_SELECT_DDR2_TIM_MODE_MASK AL_FIELD_MASK(14, 11) +#define AL_NAND_MODE_SELECT_DDR2_TIM_MODE_SHIFT 11 + +/* Register fields: ctl_reg0 */ +#define AL_NAND_CTL_REG0_CS1_MASK AL_FIELD_MASK(2, 0) +#define AL_NAND_CTL_REG0_CS1_SHIFT 0 + +#define AL_NAND_CTL_REG0_WP 3 + +#define AL_NAND_CTL_REG0_DQ_WIDTH 4 + +#define AL_NAND_CTL_REG0_COL_ADDR_CYCLES_MASK AL_FIELD_MASK(8, 5) +#define AL_NAND_CTL_REG0_COL_ADDR_CYCLES_SHIFT 5 + +#define AL_NAND_CTL_REG0_ROW_ADDR_CYCLES_MASK AL_FIELD_MASK(12, 9) +#define AL_NAND_CTL_REG0_ROW_ADDR_CYCLES_SHIFT 9 + +#define AL_NAND_CTL_REG0_PAGE_SIZE_MASK AL_FIELD_MASK(15, 13) +#define AL_NAND_CTL_REG0_PAGE_SIZE_SHIFT 13 + +#define AL_NAND_CTL_REG0_TX_MODE 16 +#define AL_NAND_CTL_REG0_TX_MODE_VAL_RX 0 +#define AL_NAND_CTL_REG0_TX_MODE_VAL_TX 1 + +#define AL_NAND_CTL_REG0_CS2_MASK AL_FIELD_MASK(21, 19) +#define AL_NAND_CTL_REG0_CS2_SHIFT 19 + +/* Register fields: bch_ctrl_reg_0 */ +#define AL_NAND_BCH_CTRL_REG_0_ECC_ON_OFF 0 + +#define AL_NAND_BCH_CTRL_REG_0_ECC_ALGORITHM 1 + +#define AL_NAND_BCH_CTRL_REG_0_BCH_T_MASK AL_FIELD_MASK(5, 2) +#define AL_NAND_BCH_CTRL_REG_0_BCH_T_SHIFT 2 + +#define AL_NAND_BCH_CTRL_REG_0_BCH_N_MASK AL_FIELD_MASK(21, 6) +#define AL_NAND_BCH_CTRL_REG_0_BCH_N_SHIFT 6 + +/* Register fields: bch_ctrl_reg_1 */ +#define AL_NAND_BCH_CTRL_REG_1_BCH_K_MASK AL_FIELD_MASK(12, 0) +#define AL_NAND_BCH_CTRL_REG_1_BCH_K_SHIFT 0 + +/* Register fields: codeword_size_cnt_reg */ +#define AL_NAND_CODEWORD_SIZE_CNT_REG_SIZE_MASK AL_FIELD_MASK(15, 0) +#define AL_NAND_CODEWORD_SIZE_CNT_REG_SIZE_SHIFT 0 + +#define AL_NAND_CODEWORD_SIZE_CNT_REG_COUNT_MASK AL_FIELD_MASK(31, 16) +#define AL_NAND_CODEWORD_SIZE_CNT_REG_COUNT_SHIFT 16 + +/* Register fields: nflash_spare_offset */ +#define AL_NAND_NFLASH_SPR_OFF_SPR_OFF_MASK AL_FIELD_MASK(15, 0) +#define AL_NAND_NFLASH_SPR_OFF_SPR_OFF_SHIFT 0 + +/* Register fields: sdr_timing_params_0 */ +#define AL_NAND_SDR_TIM_PARAMS_0_T_SETUP_MASK AL_FIELD_MASK(5, 0) +#define AL_NAND_SDR_TIM_PARAMS_0_T_SETUP_SHIFT 0 + +#define AL_NAND_SDR_TIM_PARAMS_0_T_HOLD_MASK AL_FIELD_MASK(11, 6) +#define AL_NAND_SDR_TIM_PARAMS_0_T_HOLD_SHIFT 6 + +#define AL_NAND_SDR_TIM_PARAMS_0_T_WH_MASK AL_FIELD_MASK(17, 12) +#define AL_NAND_SDR_TIM_PARAMS_0_T_WH_SHIFT 12 + +#define AL_NAND_SDR_TIM_PARAMS_0_T_WRP_MASK AL_FIELD_MASK(23, 18) +#define AL_NAND_SDR_TIM_PARAMS_0_T_WRP_SHIFT 18 + +#define AL_NAND_SDR_TIM_PARAMS_0_T_INTCMD_MASK AL_FIELD_MASK(29, 24) +#define AL_NAND_SDR_TIM_PARAMS_0_T_INTCMD_SHIFT 24 + +/* Register fields: sdr_timing_params_1 */ +#define AL_NAND_SDR_TIM_PARAMS_1_T_RR_MASK AL_FIELD_MASK(5, 0) +#define AL_NAND_SDR_TIM_PARAMS_1_T_RR_SHIFT 0 + +#define AL_NAND_SDR_TIM_PARAMS_1_T_WB_MASK AL_FIELD_MASK(11, 6) +#define AL_NAND_SDR_TIM_PARAMS_1_T_WB_SHIFT 6 +#define AL_NAND_SDR_TIM_PARAMS_1_T_WB_WIDTH 6 + +#define AL_NAND_SDR_TIM_PARAMS_1_T_READ_DLY_MASK AL_FIELD_MASK(13, 12) +#define AL_NAND_SDR_TIM_PARAMS_1_T_READ_DLY_SHIFT 12 + +#define AL_NAND_SDR_TIM_PARAMS_1_T_WB_MSB_MASK AL_FIELD_MASK(14, 14) +#define AL_NAND_SDR_TIM_PARAMS_1_T_WB_MSB_SHIFT 14 + +/* Register fields: ddr_timing_params_0 */ +#define AL_NAND_DDR_TIM_PARAMS_0_T_SETUP_MASK AL_FIELD_MASK(5, 0) +#define AL_NAND_DDR_TIM_PARAMS_0_T_SETUP_SHIFT 0 + +#define AL_NAND_DDR_TIM_PARAMS_0_T_HOLD_MASK AL_FIELD_MASK(11, 6) +#define AL_NAND_DDR_TIM_PARAMS_0_T_HOLD_SHIFT 6 + +#define AL_NAND_DDR_TIM_PARAMS_0_T_WH_MASK AL_FIELD_MASK(17, 12) +#define AL_NAND_DDR_TIM_PARAMS_0_T_WH_SHIFT 12 + +#define AL_NAND_DDR_TIM_PARAMS_0_T_WRP_MASK AL_FIELD_MASK(23, 18) +#define AL_NAND_DDR_TIM_PARAMS_0_T_WRP_SHIFT 18 + +#define AL_NAND_DDR_TIM_PARAMS_0_T_INTCMD_MASK AL_FIELD_MASK(29, 24) +#define AL_NAND_DDR_TIM_PARAMS_0_T_INTCMD_SHIFT 24 + +/* Register fields: ddr_timing_params_1 */ +#define AL_NAND_DDR_TIM_PARAMS_1_T_RR_MASK AL_FIELD_MASK(5, 0) +#define AL_NAND_DDR_TIM_PARAMS_1_T_RR_SHIFT 0 + +#define AL_NAND_DDR_TIM_PARAMS_1_T_WB_MASK AL_FIELD_MASK(11, 6) +#define AL_NAND_DDR_TIM_PARAMS_1_T_WB_SHIFT 6 + +#define AL_NAND_DDR_TIM_PARAMS_1_T_CLKDIV_MASK AL_FIELD_MASK(17, 12) +#define AL_NAND_DDR_TIM_PARAMS_1_T_CLKDIV_SHIFT 12 + +#define AL_NAND_DDR_TIM_PARAMS_1_T_CE_N_MASK AL_FIELD_MASK(23, 18) +#define AL_NAND_DDR_TIM_PARAMS_1_T_CE_N_SHIFT 18 + +#define AL_NAND_DDR_TIM_PARAMS_1_T_DQS_IN_MASK AL_FIELD_MASK(28, 24) +#define AL_NAND_DDR_TIM_PARAMS_1_T_DQS_IN_SHIFT 24 + +/* Register fields: ddr2_timing_params_0 */ +#define AL_NAND_DDR2_TIM_PARAMS_0_T_SETUP_MASK AL_FIELD_MASK(5, 0) +#define AL_NAND_DDR2_TIM_PARAMS_0_T_SETUP_SHIFT 0 + +#define AL_NAND_DDR2_TIM_PARAMS_0_T_HOLD_MASK AL_FIELD_MASK(11, 6) +#define AL_NAND_DDR2_TIM_PARAMS_0_T_HOLD_SHIFT 6 + +#define AL_NAND_DDR2_TIM_PARAMS_0_T_WH_MASK AL_FIELD_MASK(17, 12) +#define AL_NAND_DDR2_TIM_PARAMS_0_T_WH_SHIFT 12 + +#define AL_NAND_DDR2_TIM_PARAMS_0_T_WRP_MASK AL_FIELD_MASK(23, 18) +#define AL_NAND_DDR2_TIM_PARAMS_0_T_WRP_SHIFT 18 + +#define AL_NAND_DDR2_TIM_PARAMS_0_T_INTCMD_MASK AL_FIELD_MASK(29, 24) +#define AL_NAND_DDR2_TIM_PARAMS_0_T_INTCMD_SHIFT 24 + +/* Register fields: ddr2_timing_params_1 */ +#define AL_NAND_DDR2_TIM_PARAMS_1_T_RR_MASK AL_FIELD_MASK(5, 0) +#define AL_NAND_DDR2_TIM_PARAMS_1_T_RR_SHIFT 0 + +#define AL_NAND_DDR2_TIM_PARAMS_1_T_WB_MASK AL_FIELD_MASK(11, 6) +#define AL_NAND_DDR2_TIM_PARAMS_1_T_WB_SHIFT 6 + +#define AL_NAND_DDR2_TIM_PARAMS_1_T_CLKDIV_MASK AL_FIELD_MASK(17, 12) +#define AL_NAND_DDR2_TIM_PARAMS_1_T_CLKDIV_SHIFT 12 + +#define AL_NAND_DDR2_TIM_PARAMS_1_T_CE_N_MASK AL_FIELD_MASK(23, 18) +#define AL_NAND_DDR2_TIM_PARAMS_1_T_CE_N_SHIFT 18 + +#define AL_NAND_DDR2_TIM_PARAMS_1_T_DQS_IN_MASK AL_FIELD_MASK(28, 24) +#define AL_NAND_DDR2_TIM_PARAMS_1_T_DQS_IN_SHIFT 24 + +/* Register fields: rdy_busy_wait_cnt_reg */ +#define AL_NAND_RDYBSY_WAIT_CNT_REG_RDY_TOUT_CNT_MASK AL_FIELD_MASK(15, 0) +#define AL_NAND_RDYBSY_WAIT_CNT_REG_RDY_TOUT_CNT_SHIFT 0 + +#define AL_NAND_RDYBSY_WAIT_CNT_REG_RDYBSYEN_MASK AL_FIELD_MASK(16, 16) +#define AL_NAND_RDYBSY_WAIT_CNT_REG_RDYBSYEN_SHIFT 16 + +/* Register fields: rdy_busy_status_reg */ +#define AL_NAND_RDYBSY_STATUS_REG_RDYBSY_STATUS_MASK AL_FIELD_MASK(7, 0) +#define AL_NAND_RDYBSY_STATUS_REG_RDYBSY_STATUS_SHIFT 0 + +/* Register fields: nfc_int_stat */ +#define AL_NAND_NFC_INT_STAT_CMD_BUF_EMPTY 0 +#define AL_NAND_NFC_INT_STAT_CMD_BUF_FULL 1 +#define AL_NAND_NFC_INT_STAT_DATA_BUF_EMPTY 2 +#define AL_NAND_NFC_INT_STAT_DATA_BUF_FULL 3 +#define AL_NAND_NFC_INT_STAT_CORR_ERR 4 +#define AL_NAND_NFC_INT_STAT_UNCORR_ERR 5 +#define AL_NAND_NFC_INT_STAT_BUF_WR_RDY 6 +#define AL_NAND_NFC_INT_STAT_BUF_RD_RDY 7 + +/* Register fields: ddr_timing_params_2 */ +#define AL_NAND_DDR_TIM_PARAMS_2_T_DQS_OUT_MASK AL_FIELD_MASK(4, 0) +#define AL_NAND_DDR_TIM_PARAMS_2_T_DQS_OUT_SHIFT 0 + +/* Register fields: ddr2_timing_params_2 */ +#define AL_NAND_DDR2_TIM_PARAMS_2_T_DQS_OUT_MASK AL_FIELD_MASK(4, 0) +#define AL_NAND_DDR2_TIM_PARAMS_2_T_DQS_OUT_SHIFT 0 + +/* Register fields: reset_status_reg */ +#define AL_NAND_RESET_STATUS_REG_TIM_ENG_RST_DN 0 + +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +#endif /* __AL_HAL_NAND_REGS_H__ */ diff --git a/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_nand.c b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_nand.c new file mode 100644 index 00000000000000..ddc0a91312cb9e --- /dev/null +++ b/target/linux/alpine/files/drivers/mtd/nand/raw/al/al_nand.c @@ -0,0 +1,1004 @@ +/* + * Annapurna Labs Nand driver. + * + * Copyright (C) 2013 Annapurna Labs Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * TODO: + * - add sysfs statistics + * - use dma for reading writing + * - get config parameters from device tree instead of config registers + * - use correct ECC size and not entire OOB + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "al_hal_nand.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Annapurna Labs"); + +#define WAIT_EMPTY_CMD_FIFO_TIME_OUT 1000000 +#define AL_NAND_NAME "al-nand" +#define AL_NAND_MAX_ONFI_TIMING_MODE 1 + +#define AL_NAND_MAX_BIT_FLIPS 4 +#define AL_NAND_MAX_OOB_SIZE SZ_1K + +#define NAND_SET_FEATURES_ADDR 0xfa + +#define ONFI_COL_ADDR_CYCLE_MASK 0xf0 +#define ONFI_COL_ADDR_CYCLE_POS 4 +#define ONFI_ROW_ADDR_CYCLE_MASK 0x0f +#define ONFI_ROW_ADDR_CYCLE_POS 0 + +#define AL_NAND_MAX_CHIPS 4 +#define AL_NAND_ECC_SUPPORT + +static const char *probes[] = { "cmdlinepart", "ofpart", NULL }; + +struct nand_data { + struct nand_chip chip; + struct nand_controller controller; + struct mtd_info mtd; + struct platform_device *pdev; + + struct al_nand_ctrl_obj nand_obj; + uint8_t word_cache[4]; + int cache_pos; + uint32_t cw_size; + struct al_nand_dev_properties device_properties; + struct al_nand_extra_dev_properties dev_ext_props; + struct al_nand_ecc_config ecc_config; + + /*** interrupts ***/ + struct completion complete; + spinlock_t irq_lock; + uint32_t irq_status; + int irq; + + uint8_t oob[AL_NAND_MAX_OOB_SIZE]; + + uint32_t s_ecc_loc; + uint32_t s_oob_size; + uint32_t s_ecc_oob_bytes; +}; + +/* + * Addressing RMN: 2903 + * + * RMN description: + * NAND timing parameters that are used in the non-manual mode are wrong and + * reduce performance. + * Replacing with the manual parameters to increase speed + */ +#define AL_NAND_NSEC_PER_CLK_CYCLES 2.666 +#define NAND_CLK_CYCLES(nsec) ((nsec) / (AL_NAND_NSEC_PER_CLK_CYCLES)) + +const struct al_nand_device_timing al_nand_manual_timing[] = { + { + .tSETUP = NAND_CLK_CYCLES(14), + .tHOLD = NAND_CLK_CYCLES(22), + .tWRP = NAND_CLK_CYCLES(54), + .tRR = NAND_CLK_CYCLES(43), + .tWB = NAND_CLK_CYCLES(206), + .tWH = NAND_CLK_CYCLES(32), + .tINTCMD = NAND_CLK_CYCLES(86), + .readDelay = NAND_CLK_CYCLES(3) + }, + { + .tSETUP = NAND_CLK_CYCLES(14), + .tHOLD = NAND_CLK_CYCLES(14), + .tWRP = NAND_CLK_CYCLES(27), + .tRR = NAND_CLK_CYCLES(22), + .tWB = NAND_CLK_CYCLES(104), + .tWH = NAND_CLK_CYCLES(19), + .tINTCMD = NAND_CLK_CYCLES(43), + .readDelay = NAND_CLK_CYCLES(3) + }, + { + .tSETUP = NAND_CLK_CYCLES(14), + .tHOLD = NAND_CLK_CYCLES(14), + .tWRP = NAND_CLK_CYCLES(19), + .tRR = NAND_CLK_CYCLES(22), + .tWB = NAND_CLK_CYCLES(104), + .tWH = NAND_CLK_CYCLES(16), + .tINTCMD = NAND_CLK_CYCLES(43), + .readDelay = NAND_CLK_CYCLES(3) + }, + { + .tSETUP = NAND_CLK_CYCLES(14), + .tHOLD = NAND_CLK_CYCLES(14), + .tWRP = NAND_CLK_CYCLES(16), + .tRR = NAND_CLK_CYCLES(22), + .tWB = NAND_CLK_CYCLES(104), + .tWH = NAND_CLK_CYCLES(11), + .tINTCMD = NAND_CLK_CYCLES(27), + .readDelay = NAND_CLK_CYCLES(3) + }, + { + .tSETUP = NAND_CLK_CYCLES(14), + .tHOLD = NAND_CLK_CYCLES(14), + .tWRP = NAND_CLK_CYCLES(14), + .tRR = NAND_CLK_CYCLES(22), + .tWB = NAND_CLK_CYCLES(104), + .tWH = NAND_CLK_CYCLES(11), + .tINTCMD = NAND_CLK_CYCLES(27), + .readDelay = NAND_CLK_CYCLES(3) + }, + { + .tSETUP = NAND_CLK_CYCLES(14), + .tHOLD = NAND_CLK_CYCLES(14), + .tWRP = NAND_CLK_CYCLES(14), + .tRR = NAND_CLK_CYCLES(22), + .tWB = NAND_CLK_CYCLES(104), + .tWH = NAND_CLK_CYCLES(11), + .tINTCMD = NAND_CLK_CYCLES(27), + .readDelay = NAND_CLK_CYCLES(3) + } +}; + + +static inline struct nand_data *nand_data_get(struct mtd_info *mtd) +{ + struct nand_chip *nand_chip = mtd_to_nand(mtd); + + return nand_get_controller_data(nand_chip); +} + +static void nand_cw_size_get(int num_bytes, uint32_t *cw_size, uint32_t *cw_count) +{ + num_bytes = AL_ALIGN_UP(num_bytes, 4); + + if (num_bytes < *cw_size) + *cw_size = num_bytes; + + if (0 != (num_bytes % *cw_size)) + *cw_size = num_bytes / 4; + + BUG_ON(num_bytes % *cw_size); + + *cw_count = num_bytes / *cw_size; +} + +static void nand_send_byte_count_command(struct al_nand_ctrl_obj *nand_obj, + enum al_nand_command_type cmd_id, + uint16_t len) +{ + uint32_t cmd; + + cmd = AL_NAND_CMD_SEQ_ENTRY(cmd_id, (len & 0xff)); + + al_nand_cmd_single_execute(nand_obj, cmd); + + cmd = AL_NAND_CMD_SEQ_ENTRY(cmd_id, ((len & 0xff00) >> 8)); + + al_nand_cmd_single_execute(nand_obj, cmd); +} + +static void nand_wait_cmd_fifo_empty(struct nand_data *nand) +{ + uint32_t i = WAIT_EMPTY_CMD_FIFO_TIME_OUT; + int cmd_buff_empty; + + while (i > 0) { + cmd_buff_empty = al_nand_cmd_buff_is_empty(&nand->nand_obj); + if (cmd_buff_empty) + break; + + udelay(1); + i--; + } + + if (i == 0) + dev_err(&nand->pdev->dev, + "%s: waited for empty cmd fifo for more than a sec!\n", + __func__); +} + +static void nand_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl) +{ + struct mtd_info *mtd; + struct nand_data *nand; + enum al_nand_command_type type; + uint32_t cmd; + + mtd = nand_to_mtd(chip); + nand = nand_data_get(mtd); + + dev_dbg(&nand->pdev->dev, "%s: dat 0x%x ctrl 0x%x\n", + __func__, dat, ctrl); + + if ((ctrl & (NAND_CLE | NAND_ALE)) == 0) { + dev_dbg(&nand->pdev->dev, "%s: drop cmd ctrl\n", __func__); + return; + } + + nand->cache_pos = -1; + + type = ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE) ? + AL_NAND_COMMAND_TYPE_CMD : + AL_NAND_COMMAND_TYPE_ADDRESS; + cmd = AL_NAND_CMD_SEQ_ENTRY(type, (dat & 0xff)); + + dev_dbg(&nand->pdev->dev, "%s: type 0x%x cmd 0x%x\n", + __func__, type, cmd); + + al_nand_cmd_single_execute(&nand->nand_obj, cmd); + + nand_wait_cmd_fifo_empty(nand); + + if ((dat == NAND_CMD_PAGEPROG) && (ctrl & NAND_CLE)) { + cmd = AL_NAND_CMD_SEQ_ENTRY(AL_NAND_COMMAND_TYPE_WAIT_FOR_READY, 0); + + dev_dbg(&nand->pdev->dev, "%s: pageprog cmd = 0x%x\n", + __func__, cmd); + + al_nand_cmd_single_execute(&nand->nand_obj, cmd); + + nand_wait_cmd_fifo_empty(nand); + + al_nand_wp_set_enable(&nand->nand_obj, 1); + al_nand_tx_set_enable(&nand->nand_obj, 0); + } +} + +static void nand_dev_select(struct nand_chip *chip, int chipnr) +{ + struct mtd_info *mtd; + struct nand_data *nand; + + mtd = nand_to_mtd(chip); + nand = nand_data_get(mtd); + + dev_dbg(&nand->pdev->dev, "%s: chipnr %d\n", + __func__, chipnr); + + if (chipnr < 0) + return; + + al_nand_dev_select(&nand->nand_obj, chipnr); +} + +static int nand_dev_ready(struct nand_chip *chip) +{ + struct mtd_info *mtd; + struct nand_data *nand; + int is_ready = 0; + + mtd = nand_to_mtd(chip); + nand = nand_data_get(mtd); + + is_ready = al_nand_dev_is_ready(&nand->nand_obj); + + dev_dbg(&nand->pdev->dev, "%s: is_ready %d\n", + __func__, is_ready); + + return is_ready; +} + +/* + * read len bytes from the nand device. + */ +static void nand_read_buff(struct nand_chip *chip, uint8_t *buf, int len) +{ + uint32_t cw_size; + uint32_t cw_count; + struct mtd_info *mtd; + struct nand_data *nand; + unsigned long timeout; + + mtd = nand_to_mtd(chip); + nand = nand_data_get(mtd); + + dev_dbg(&nand->pdev->dev, "%s: len %d\n", __func__, len); + + cw_size = nand->cw_size; + + BUG_ON(len & 3); + BUG_ON(nand->cache_pos != -1); + + nand_cw_size_get(len, &cw_size, &cw_count); + + dev_dbg(&nand->pdev->dev, "%s: cw_size %d cw_count %d\n", + __func__, cw_size, cw_count); + + al_nand_cw_config(&nand->nand_obj, cw_size, cw_count); + + while (cw_count--) + nand_send_byte_count_command(&nand->nand_obj, + AL_NAND_COMMAND_TYPE_DATA_READ_COUNT, + cw_size); + + reinit_completion(&nand->complete); + + while (len > 0) { + dev_dbg(&nand->pdev->dev, "%s: waiting for read to become ready, len %d\n", + __func__, len); + + al_nand_int_enable(&nand->nand_obj, AL_NAND_INTR_STATUS_BUF_RDRDY); + timeout = wait_for_completion_timeout(&nand->complete, HZ); + al_nand_int_disable(&nand->nand_obj, AL_NAND_INTR_STATUS_BUF_RDRDY); + + if (!timeout) { + /* timeout */ + dev_err(&nand->pdev->dev, "%s: timeout occurred, len %d\n", + __func__, len); + break; + } + + dev_dbg(&nand->pdev->dev, "%s: read ready\n", __func__); + + al_nand_data_buff_read(&nand->nand_obj, cw_size, 0, 0, buf); + + buf += cw_size; + len -= cw_size; + } +} + +/* + * read byte from the device. + * read byte is not supported by the controller so this function reads + * 4 bytes as a cache and use it in the next calls. + */ +static uint8_t nand_read_byte_from_fifo(struct nand_chip *chip) +{ + struct mtd_info *mtd; + struct nand_data *nand; + uint8_t ret_val; + + mtd = nand_to_mtd(chip); + nand = nand_data_get(mtd); + + dev_dbg(&nand->pdev->dev, "%s: cache_pos %d", + __func__, nand->cache_pos); + + if (nand->cache_pos == -1) { + nand_read_buff(chip, nand->word_cache, 4); + nand->cache_pos = 0; + } + + ret_val = nand->word_cache[nand->cache_pos]; + nand->cache_pos++; + if (nand->cache_pos == 4) + nand->cache_pos = -1; + + dev_dbg(&nand->pdev->dev, "%s: ret_val = 0x%x\n", + __func__, ret_val); + + return ret_val; +} + +/* + * writing buffer to the nand device. + * this func will wait for the write to be complete + */ +static void nand_write_buff(struct nand_chip *chip, const uint8_t *buf, int len) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + uint32_t cw_size = mtd_to_nand(mtd)->ecc.size; + uint32_t cw_count; + struct nand_data *nand; + void __iomem *data_buff; + unsigned long timeout; + + nand = nand_data_get(mtd); + + dev_dbg(&nand->pdev->dev, "%s: cw_size %d len %d start: 0x%x%x%x\n", + __func__, cw_size, len, buf[0], buf[1], buf[2]); + + al_nand_tx_set_enable(&nand->nand_obj, 1); + al_nand_wp_set_enable(&nand->nand_obj, 0); + + nand_cw_size_get(len, &cw_size, &cw_count); + + dev_dbg(&nand->pdev->dev, "%s: cw_size %d cw_count %d\n", + __func__, cw_size, cw_count); + + al_nand_cw_config(&nand->nand_obj, cw_size, cw_count); + + while (cw_count--) + nand_send_byte_count_command(&nand->nand_obj, + AL_NAND_COMMAND_TYPE_DATA_WRITE_COUNT, + cw_size); + + reinit_completion(&nand->complete); + + while (len > 0) { + dev_dbg(&nand->pdev->dev, "%s: waiting for write to become ready, len %d\n", + __func__, len); + + al_nand_int_enable(&nand->nand_obj, AL_NAND_INTR_STATUS_BUF_WRRDY); + timeout = wait_for_completion_timeout(&nand->complete, HZ); + al_nand_int_disable(&nand->nand_obj, AL_NAND_INTR_STATUS_BUF_WRRDY); + + if (!timeout) { + /* timeout */ + dev_err(&nand->pdev->dev, "%s: timeout occurred, len %d\n", + __func__, len); + break; + } + + dev_dbg(&nand->pdev->dev, "%s: write ready\n", __func__); + + data_buff = al_nand_data_buff_base_get(&nand->nand_obj); + memcpy(data_buff, buf, cw_size); + + buf += cw_size; + len -= cw_size; + } + + /* enable wp and disable tx will be executed after commands + * NAND_CMD_PAGEPROG and AL_NAND_COMMAND_TYPE_WAIT_FOR_READY will be + * sent to make sure all data were written. + */ +} + +/******************************************************************************/ +/**************************** ecc functions ***********************************/ +/******************************************************************************/ +#ifdef AL_NAND_ECC_SUPPORT + +static inline int is_empty_oob(uint8_t *oob, int len) +{ + int flips = 0; + int i; + int j; + + for (i = 0; i < len; i++) { + if (oob[i] == 0xff) + continue; + + for (j = 0; j < 8; j++) { + if ((oob[i] & BIT(j)) == 0) { + flips++; + if (flips >= AL_NAND_MAX_BIT_FLIPS) + break; + } + } + } + + if (flips < AL_NAND_MAX_BIT_FLIPS) + return 1; + + return 0; +} +/* + * read page with HW ecc support (corrected and uncorrected stat will be + * updated). + */ +static int ecc_read_page(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) +{ + struct mtd_info *mtd; + struct nand_data *nand; + int uncorr_err_count = 0; + int corr_err_count = 0; + int ret; + + BUG_ON(oob_required); + + mtd = nand_to_mtd(chip); + nand = nand_data_get(mtd); + + dev_dbg(&nand->pdev->dev, "%s: oob_required %d page %d\n", + __func__, oob_required, page); + + /* Clear TX/RX ECC state machine */ + al_nand_tx_set_enable(&nand->nand_obj, 1); + al_nand_tx_set_enable(&nand->nand_obj, 0); + + al_nand_uncorr_err_clear(&nand->nand_obj); + al_nand_corr_err_clear(&nand->nand_obj); + + al_nand_ecc_set_enabled(&nand->nand_obj, 1); + + /* First need to read the OOB to the controller to calc the ecc */ + chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, nand->s_ecc_loc, page); + + nand_send_byte_count_command(&nand->nand_obj, + AL_NAND_COMMAND_TYPE_SPARE_READ_COUNT, + nand->s_ecc_oob_bytes); + + /* move to the start of the page to read the data */ + chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, 0x00, -1); + + /* read the buffer (after ecc correction) */ + ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize); + + uncorr_err_count = al_nand_uncorr_err_get(&nand->nand_obj); + corr_err_count = al_nand_corr_err_get(&nand->nand_obj); + + al_nand_ecc_set_enabled(&nand->nand_obj, 0); + + /* update statistics*/ + if (uncorr_err_count != 0) { + bool uncorr_err = true; + if (nand->ecc_config.algorithm == AL_NAND_ECC_ALGORITHM_BCH) { + /* the ECC in BCH algorithm will find an uncorrected + * errors while trying to read an empty page. + * to avoid error messages and failures in the upper + * layer, don't update the statistics in this case */ + chip->legacy.read_buf(chip, nand->oob, mtd->oobsize); + + if (is_empty_oob(nand->oob, mtd->oobsize)) + uncorr_err = false; + } + + if (uncorr_err) { + mtd->ecc_stats.failed++; + dev_err(&nand->pdev->dev, + "%s uncorrected errors found in page %d (increased to %d)\n", + __func__, page, mtd->ecc_stats.failed); + } + } + + if (corr_err_count != 0) { + mtd->ecc_stats.corrected++; + dev_dbg(&nand->pdev->dev, "%s: corrected increased\n", + __func__); + } + + dev_dbg(&nand->pdev->dev, "%s: total corrected %d\n", + __func__, mtd->ecc_stats.corrected); + + return ret; +} + +static int ecc_read_subpage(struct nand_chip *chip, uint32_t offs, uint32_t len, uint8_t *buf, int page) +{ + struct mtd_info *mtd; + struct nand_data *nand; + + mtd = nand_to_mtd(chip); + nand = nand_data_get(mtd); + + dev_dbg(&nand->pdev->dev, "read subpage not supported!\n"); + return -1; +} +/* + * program page with HW ecc support. + * this function is called after the commands and adderess for this page sent. + */ +static int ecc_write_page(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) +{ + struct mtd_info *mtd; + struct nand_data *nand; + uint32_t cmd; + + BUG_ON(oob_required); + + mtd = nand_to_mtd(chip); + nand = nand_data_get(mtd); + + dev_dbg(&nand->pdev->dev, "%s: page %d writesize %d ecc_loc %d ecc_oob_bytes %d\n", + __func__, page, mtd->writesize, nand->s_ecc_loc, nand->s_ecc_oob_bytes); + + nand_prog_page_begin_op(chip, page, 0, NULL, 0); + + al_nand_ecc_set_enabled(&nand->nand_obj, 1); + + nand_write_buff(chip, buf, mtd->writesize); + + /* First need to read the OOB to the controller to calc the ecc */ + chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, + mtd->writesize + nand->s_ecc_loc, -1); + + cmd = AL_NAND_CMD_SEQ_ENTRY(AL_NAND_COMMAND_TYPE_WAIT_CYCLE_COUNT, + 0); + + al_nand_tx_set_enable(&nand->nand_obj, 1); + al_nand_wp_set_enable(&nand->nand_obj, 0); + + al_nand_cmd_single_execute(&nand->nand_obj, cmd); + + nand_send_byte_count_command(&nand->nand_obj, + AL_NAND_COMMAND_TYPE_SPARE_WRITE_COUNT, + nand->s_ecc_oob_bytes); + + nand_wait_cmd_fifo_empty(nand); + + al_nand_wp_set_enable(&nand->nand_obj, 1); + al_nand_tx_set_enable(&nand->nand_obj, 0); + + al_nand_ecc_set_enabled(&nand->nand_obj, 0); + + return nand_prog_page_end_op(chip); +} + +#endif /* #ifdef AL_NAND_ECC_SUPPORT */ + +/******************************************************************************/ +/****************************** interrupts ************************************/ +/******************************************************************************/ +static irqreturn_t al_nand_isr(int irq, void *dev_id); + +static void nand_interrupt_init(struct nand_data *nand) +{ + int ret; + + init_completion(&nand->complete); + spin_lock_init(&nand->irq_lock); + nand->irq_status = 0; + al_nand_int_disable(&nand->nand_obj, 0xffff); + al_nand_int_clear(&nand->nand_obj, 0xffff); + + ret = request_irq(nand->irq, al_nand_isr, IRQF_SHARED, AL_NAND_NAME, nand); + if (ret) + dev_err(&nand->pdev->dev, "%s: failed to request irq %d (%d)\n", + __func__, nand->irq, ret); +} +/* + * ISR for nand interrupts - save the interrupt status, disable this interrupts + * and notify the waiting proccess. + */ +static irqreturn_t al_nand_isr(int irq, void *dev_id) +{ + struct nand_data *nand = dev_id; + + nand->irq_status = al_nand_int_status_get(&nand->nand_obj); + al_nand_int_disable(&nand->nand_obj, nand->irq_status); + + dev_dbg(&nand->pdev->dev, "%s: irq_status 0x%x\n", + __func__, nand->irq_status); + + complete(&nand->complete); + + return IRQ_HANDLED; +} + +/******************************************************************************/ +/**************************** configuration ***********************************/ +/******************************************************************************/ +static void nand_set_timing_mode(struct nand_data *nand, + enum al_nand_device_timing_mode timing) +{ + uint32_t cmds[] = { + AL_NAND_CMD_SEQ_ENTRY( + AL_NAND_COMMAND_TYPE_CMD, NAND_CMD_SET_FEATURES), + AL_NAND_CMD_SEQ_ENTRY( + AL_NAND_COMMAND_TYPE_ADDRESS, NAND_SET_FEATURES_ADDR), + AL_NAND_CMD_SEQ_ENTRY( + AL_NAND_COMMAND_TYPE_STATUS_WRITE, timing), + AL_NAND_CMD_SEQ_ENTRY( + AL_NAND_COMMAND_TYPE_STATUS_WRITE, 0x00), + AL_NAND_CMD_SEQ_ENTRY( + AL_NAND_COMMAND_TYPE_STATUS_WRITE, 0x00), + AL_NAND_CMD_SEQ_ENTRY( + AL_NAND_COMMAND_TYPE_STATUS_WRITE, 0x00)}; + + al_nand_cmd_seq_execute(&nand->nand_obj, cmds, ARRAY_SIZE(cmds)); + + nand_wait_cmd_fifo_empty(nand); +} + +static int nand_resources_get_and_map(struct platform_device *pdev, + void __iomem **nand_base, + void __iomem **pbs_base) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "annapurna-labs,al-nand"); + + *nand_base = of_iomap(np, 0); + if (!(*nand_base)) { + pr_err("%s: failed to map nand memory\n", __func__); + return -ENOMEM; + } + + np = of_find_compatible_node(NULL, NULL, "annapurna-labs,al-pbs"); + + *pbs_base = of_iomap(np, 0); + if (!(*pbs_base)) { + pr_err("%s: pbs_base map failed\n", __func__); + return -ENOMEM; + } + + return 0; +} + +static void nand_onfi_config_set(struct nand_chip *nand, + struct al_nand_dev_properties *device_properties, + struct al_nand_ecc_config *ecc_config) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_data *nand_dat = nand_data_get(mtd); + int i; + + dev_info(&nand_dat->pdev->dev, "%s: ONFI sdr_timing_modes 0x%x\n", + __func__, le16_to_cpu(nand->parameters.onfi->sdr_timing_modes)); + + /* find the max timing mode supported by the device and below + * AL_NAND_MAX_ONFI_TIMING_MODE */ + for (i = AL_NAND_MAX_ONFI_TIMING_MODE ; i >= 0 ; i--) { + if (!(BIT(i) & le16_to_cpu(nand->parameters.onfi->sdr_timing_modes))) + continue; + + dev_info(&nand_dat->pdev->dev, "%s: chosen manual timing mode index %d\n", + __func__, i); + /* + * Addressing RMN: 2903 + */ + device_properties->timingMode = AL_NAND_DEVICE_TIMING_MODE_MANUAL; + + memcpy(&device_properties->timing, + &al_nand_manual_timing[i], + sizeof(struct al_nand_device_timing)); + + break; + } + + BUG_ON(i < 0); + + dev_info(&nand_dat->pdev->dev, "%s: timing mode %d\n", + __func__, device_properties->timingMode); + + nand_set_timing_mode(nand_get_controller_data(nand), device_properties->timingMode); +} + +static void nand_ecc_config(struct nand_chip *nand, + uint32_t oob_size, + uint32_t hw_ecc_enabled, + uint32_t ecc_loc) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_data *nand_dat = nand_data_get(mtd); + struct nand_ecc_ctrl *ecc = &nand->ecc; + int ecc_steps; + + dev_info(&nand_dat->pdev->dev, "%s: hw_ecc_enabled %d oob_size %d ecc_loc %d\n", + __func__, hw_ecc_enabled, oob_size, ecc_loc); + +#ifdef AL_NAND_ECC_SUPPORT + if (hw_ecc_enabled != 0) { + nand_dat->s_ecc_loc = ecc_loc; + nand_dat->s_oob_size = oob_size; + nand_dat->s_ecc_oob_bytes = oob_size - ecc_loc; + + ecc_steps = nand_dat->dev_ext_props.pageSize / nand_dat->cw_size; + + ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; + ecc->size = nand_dat->cw_size; // message size + ecc->bytes = nand_dat->s_ecc_oob_bytes / ecc_steps; // bytes available for ECC + ecc->strength = 4 * (1 + nand_dat->ecc_config.num_corr_bits); // ECC number of correction bits + + ecc->read_page = ecc_read_page; + ecc->read_subpage = ecc_read_subpage; + ecc->write_page = ecc_write_page; + + mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout()); + } else { + ecc->engine_type = NAND_ECC_ENGINE_TYPE_NONE; + } +#else + memset(layout, 0, sizeof(struct nand_ecclayout)); + layout->eccbytes = oob_size - ecc_loc; + layout->oobfree[0].offset = 2; + layout->oobfree[0].length = ecc_loc - 2; + layout->eccpos[0] = ecc_loc; + + ecc->layout = layout; + + ecc->mode = NAND_ECC_NONE; +#endif +} + +static int al_nand_attach_chip(struct nand_chip *nand) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_data *nand_dat = nand_data_get(mtd); + uint32_t ecc_loc; + int ret; + + BUG_ON(mtd->oobsize > AL_NAND_MAX_OOB_SIZE); + + nand_onfi_config_set(nand, &nand_dat->device_properties, &nand_dat->ecc_config); + + /* Offset in OOB where ECC starts */ + ecc_loc = nand_dat->ecc_config.spareAreaOffset - nand_dat->dev_ext_props.pageSize; + nand_ecc_config(nand, mtd->oobsize, nand_dat->dev_ext_props.eccIsEnabled, ecc_loc); + + ret = al_nand_dev_config(&nand_dat->nand_obj, + &nand_dat->device_properties, + &nand_dat->ecc_config); + if (ret) { + dev_err(&nand_dat->pdev->dev, "al_nand_dev_config failed with %d\n", ret); + return -EIO; + } + + return 0; +} + +static const struct nand_controller_ops al_nand_controller_ops = { + .attach_chip = al_nand_attach_chip, +}; + +static int al_nand_probe(struct platform_device *pdev) +{ + struct mtd_info *mtd; + struct nand_chip *nand; + struct nand_data *nand_dat; + int ret = 0; + void __iomem *nand_base; + void __iomem *pbs_base; + + nand_dat = kzalloc(sizeof(struct nand_data), GFP_KERNEL); + if (nand_dat == NULL) { + pr_err("Failed to allocate nand_data!\n"); + return -1; + } + + pr_info("%s: AnnapurnaLabs nand driver\n", __func__); + + nand = &nand_dat->chip; + mtd = nand_to_mtd(nand); +// mtd->priv = nand; + + nand_dat->cache_pos = -1; + nand_controller_init(&nand_dat->controller); + nand->controller = &nand_dat->controller; + nand->controller->ops = &al_nand_controller_ops; + nand_set_controller_data(nand, nand_dat); + nand_dat->pdev = pdev; + mtd->dev.parent = &pdev->dev; + nand_set_flash_node(nand, pdev->dev.of_node); + + dev_set_drvdata(&pdev->dev, nand_dat); + + mtd->name = kasprintf(GFP_KERNEL, AL_NAND_NAME); + if (!mtd->name) { + pr_err("%s: error allocating name\n", __func__); + kfree(nand_dat); + return -ENOMEM; + } + + ret = nand_resources_get_and_map(pdev, &nand_base, &pbs_base); + if (ret != 0) { + pr_err("%s: nand_resources_get_and_map failed\n", __func__); + goto err; + } + + ret = al_nand_init(&nand_dat->nand_obj, nand_base, NULL, 0); + if (ret != 0) { + pr_err("nand init failed\n"); + goto err; + } + + if (0 != al_nand_dev_config_basic(&nand_dat->nand_obj)) { + pr_err("dev_config_basic failed\n"); + ret = -EIO; + goto err; + } + + nand_dat->irq = platform_get_irq(pdev, 0); + if (nand_dat->irq < 0) { + pr_err("%s: no irq defined\n", __func__); + return -ENXIO; + } + nand_interrupt_init(nand_dat); + + nand->options = NAND_NO_SUBPAGE_WRITE; + + nand->legacy.cmd_ctrl = nand_cmd_ctrl; + nand->legacy.read_byte = nand_read_byte_from_fifo; + nand->legacy.read_buf = nand_read_buff; + nand->legacy.dev_ready = nand_dev_ready; + nand->legacy.write_buf = nand_write_buff; + nand->legacy.select_chip = nand_dev_select; + + ret = al_nand_properties_decode(pbs_base, + &nand_dat->device_properties, + &nand_dat->ecc_config, + &nand_dat->dev_ext_props); + if (ret) { + pr_err("%s: nand_properties_decode failed with %d\n", __func__, ret); + ret = -EIO; + goto err; + } + + dev_info(&nand_dat->pdev->dev, "device_properties: num_col_cyc %d num_row_cyc %d pageSize %d\n", + nand_dat->device_properties.num_col_cyc, + nand_dat->device_properties.num_row_cyc, + nand_dat->device_properties.pageSize); + dev_info(&nand_dat->pdev->dev, "dev_ext_props: pageSize %d blockSize %d wordSize %d eccIsEnabled %d\n", + nand_dat->dev_ext_props.pageSize, + nand_dat->dev_ext_props.blockSize, + nand_dat->dev_ext_props.wordSize, + nand_dat->dev_ext_props.eccIsEnabled); + dev_info(&nand_dat->pdev->dev, "ecc_config: algorithm %d num_corr_bits %d messageSize %d spareAreaOffset %d\n", + nand_dat->ecc_config.algorithm, + nand_dat->ecc_config.num_corr_bits, + nand_dat->ecc_config.messageSize, + nand_dat->ecc_config.spareAreaOffset); + + /* must be set before scan_ident cause it uses read_buff */ + nand_dat->cw_size = 512 << nand_dat->ecc_config.messageSize; + + ret = nand_scan(nand, AL_NAND_MAX_CHIPS); + if (ret) { + pr_err("%s: nand_scan failed\n", __func__); + goto err; + } + + dev_info(&nand_dat->pdev->dev, "oobavail %d\n", mtd->oobavail); + + mtd_device_parse_register(mtd, probes, NULL, NULL, 0); + + return 0; + +err: + kfree(nand_dat->mtd.name); + kfree(nand_dat); + return ret; +} + +static int al_nand_remove(struct platform_device *pdev) +{ + struct nand_data *nand_dat = dev_get_drvdata(&pdev->dev); + int ret; + + dev_dbg(&nand_dat->pdev->dev, "%s: nand driver removed\n", __func__); + + ret = mtd_device_unregister(&nand_dat->mtd); + WARN_ON(ret); + nand_cleanup(&nand_dat->chip); + + kfree(nand_dat->mtd.name); + kfree(nand_dat); + + return 0; +} + +static const struct of_device_id al_nand_match[] = { + { .compatible = "annapurna-labs,al-nand", }, + {} +}; + +static struct platform_driver al_nand_driver = { + .driver = { + .name = "annapurna-labs,al-nand", + .owner = THIS_MODULE, + .of_match_table = al_nand_match, + }, + .probe = al_nand_probe, + .remove = al_nand_remove, +}; + +static int __init nand_init(void) +{ + return platform_driver_register(&al_nand_driver); +} + +static void __exit nand_exit(void) +{ + platform_driver_unregister(&al_nand_driver); +} + +module_init(nand_init); +module_exit(nand_exit); + diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/Kconfig b/target/linux/alpine/files/drivers/net/ethernet/al/Kconfig new file mode 100644 index 00000000000000..bb5e54aa9b5056 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/Kconfig @@ -0,0 +1,33 @@ +config NET_AL_ETH + tristate "Annapurna Labs unified 1G/10G Ethernet driver" + depends on PCI && INET && AL_DMA && ARCH_ALPINE + select PHYLIB + help + This is the driver supports both standard and advanced Annapurna Labs + 1G and 10G Ethernet controllers. + +choice + prompt "Receive buffers allocate mode" + default AL_ETH_ALLOC_FRAG + depends on NET_AL_ETH + +config AL_ETH_ALLOC_FRAG + bool "Enabling frag allocation for rx buffers" + help + Say Y here if you want to enabling frag allocations for rx buffers, + in the Annapurna Labs eth driver. + +config AL_ETH_ALLOC_PAGE + bool "Enabling pages allocation for rx buffers" + help + Say Y here if you want to enabling page allocations for rx buffers, + in the Annapurna Labs eth driver. + +config AL_ETH_ALLOC_SKB + bool "Enabling skb allocation for rx buffers" + help + Say Y here if you want to enable skb allocations for rx buffers, + in the Annapurna Labs eth driver. This allocation mode is prefered for + data plane applications. + +endchoice diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/Makefile b/target/linux/alpine/files/drivers/net/ethernet/al/Makefile new file mode 100644 index 00000000000000..ef4f891ed6af1a --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/Makefile @@ -0,0 +1,5 @@ +ccflags-y := -I$(srctree)/arch/arm/mach-alpine/include + +obj-$(CONFIG_NET_AL_ETH) += al_eth_drv.o + +al_eth_drv-objs := al_eth.o al_eth_sysfs.o al_hal_eth_main.o al_hal_eth_kr.o al_init_eth_kr.o al_init_eth_lm.o diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/README b/target/linux/alpine/files/drivers/net/ethernet/al/README new file mode 100644 index 00000000000000..3892cf2e539d27 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/README @@ -0,0 +1,354 @@ +Linux driver for Annapurna Labs Ethernet unified adapter +This driver works for both Standard and Advanced Controller +modes, and support the various media types and speeds. + +Arch: +==== + +This driver implements standard Linux Ethernet driver, the kernel communicates +with the driver using the net_device_ops (defined at +include/linux/netdevice.h). All Ethernet Adapters in Annapurna Labs Alpine are +implemented as integrated PCI-E End point, hence the driver uses the PCI +interface for probing the adapter and other various management functions. + +The driver communicates with the hardware using the Annapurna Labs Ethernet +and UDMA HAL drivers. + + +Internal Data Structures: +========================= +al_eth_adapter: +--------------- + This structure holds all the information needed to operate the adapter + fields: + - netdev: pointer to Linux net device structure + - pdev: pointer to Linux PCI device structure + - flags: various flags about the status of the driver (MSI-X mode, ...) + - hal_adapter: the HAL structure used by HAL to manage the adapter + - num_tx_queues/num_rx_queues: number of TX/RX queues used by the driver + - tx_ring: per queue al_eth_ring data structure, used to manage + information used for transmitting packets. + - rx_ring: per queue al_eth_ring data structure, used to manage + information used for receiving packets. + - al_napi: array of al_eth_napi, each interrupt that is NAPI based has entry + in this array. The driver uses two NAPI interrupts per queue, one for TX + and one for RX. + - int_mode: interrupt mode (legacy, MSI-X) + - irq_tbl: array of al_eth_irq, each interrupt used by the driver has entry + in this array. + - msix_entries: pointer to linux data structure used to communicate with the + kernel which entries to use for msix, and which irqs the kernel assigned + for those interrupts. + - msix_vecs: number of MSI-X interrupts used. + - tx_ring_count: the size of the tx_ring + - tx_descs_count: the size of the tx hardware queue. + - rx_ring_count: the size of the rx_ring + - rx_descs_count: the size of the RX hardware queue (same value as + rx_ring_count) + - toeplitz_hash_key: array of the keys used by the HW for hash calculation. + - rss_ind_tbl: RSS (Receive Side Scaling) indirection table, holds the queue + index for each flow. + - msg_enable: variable used by the linux network stack to control the log level + of the driver. + - mac_stats: the HAL structure used to read statistics from the HAL. + - mac_mode: the HW mac mode (RGMII, 10G serial,...). + - phy_exist: boolean that defines whether external Ethernet phy is connected. + - mdio_bus: linux structure for the mdio controller implemented by the driver. + - phy_dev: linux structure for the Phy device. + - phy_addr: the MDIO device address of the Phy. + - link_config: structure used to hold the link parameters that provide by the + linux Phy driver, those parameter provided to the HAL in order to config + the mac when link parameter get changed by link partner or by user + intervention (using mii-tool). + - flow_ctrl_params: used to configure the HW to use flow control. + - eth_hal_params: data structure used to pass various parameters to the + Ethernet HAL driver. + - link_status_task: delayed task used for polling link status. + - link_poll_interval: the interval between every occurrence of the task above. + configurable through sysfs. + - serdes_init: true if the serdes obj is already initialized. + - serdes_obj: Serdes private structure + - serdes_grp: The Serdes group for this device. + - serdes_lane: The Serdes lane for this device. + - an_en: set to true when auto-negotiation is needed for this port. + - lt_en: set to true when link training is needed for this port. + - last_link_status: Saves the link status from the last check. used for + determine if the link status has changed. + - lt_completed: True if link training was completed in this connection. + +al_eth_ring: +----------- + This structure used to manage outstanding TX packets(packets transmitted + and not acknowledged yet), and also for RX buffers (allocated RX buffers + that not filled by the hardware yet). Each TX or RX queue has single instance of + this structure. The TX and RX queues are completely independent, each with its + own ring and can run on its own core. This structure uses circular buffer data + structure. + + Fields: + - dev: cache of the struct device of the adapter, used for log messages + and dma mapping functions. + - hal_pkt: this structure used for Rx only, this is the structure that + filled by the HAL driver with informations needed about received packets. + - dma_q: the UDMA queue data structure, this stucture needed by HAL API. + - next_to_use: index at which the producer inserts items into the buffer. + - next_to_clean: index at which the consumer finds the next item in the buffer. + - frag_size: receive fragment size. used in RX only. + - unmask_reg_offset: the offset of the mask clear register in the interrupt + controller, the driver bypass the HAL API and directly access this register + for perfmance sake. + - unmask_val: the value writtin to the register above. + - tx_buffer_info: points to buffer of TX items used to track outstanding tx + packets. + - rx_buffer_info: points to buffer of RX items used to track allocated + buffers that added to the DMA engine and still not filled by received + frames. + - sw_count: number of TX/RX_buffer_info's entries. + - hw_count: number of HW descriptors. + - size: size (in bytes) of HW descriptors + - csize: size (in bytes) of HW completion descriptors, used for rx. + q_params: structures used to initialize UDMA queue. + +al_eth_rx_buffer: +----------------- + This structure used to save context of single data buffer allocated + for frames reception. + Fields: + - skb: pointer to the Linux network stack data structure for packets. + - page: pointer to the page allocated as data buffer. Used only when using + the PAGE allocation mode. + - page_offset: when using PAGE allocation mode, this field holds the + offset within the page where the data buffer should started. Currently + this fields is always set to 0. + - data: pointer to the CPU address of the data buffer. Used only when + using frags allocation mode. + - data_size: size in bytes allocated for data in this buffer. + - frag_size: total fragment size in bytes. + - dma: the physical address of the data buffer. + - al_buf: HAL data structure used to hold information (physical address and + length) of a data buffer. + +al_eth_tx_buffer: +----------------- + This structure used to save context of a packet that add to the Tx + queue and still not acknowledged as completed. + Fields: + - skb: pointer to the Linux network stack data structure for packets. + - hal_pkt: HAL data structure used to pass packets information when + requesting packet transmission from the HAL driver. + - tx_descs: number of descriptors used by the DMA engine to represent the + packet. This value of this field is set by the HAL layer. + +al_eth_napi: +------------ + This is our private data structure used to retrieve all needed + information when NAPI polling scheduled. The driver uses NAPI interface for + handling transmit completions and packets receptions, Each TX and RX queue has + instance of this structure. + +al_eth_irq: +----------- + This data structure used to save all needed information about each + interrupt registered by the driver. + + +Interrupts mode: +================ +The Ethernet adapter supports the TrueMultiCore(TM) technology and is based on +Annapurna Labs Unified DMA (aka UDMA), thus it has an interrupt controller that +can generate legacy level sensitive interrupt, or alternatively, +MSI-X interrupt for each cause bit. + +The driver tries first to work in per-queue MSI-X mode for optimal performance, +with MSI-X interrupt for each TX, and for each RX queue, and single one for +management (bit 0 of group A). If it fails to enable the per-queue MSI-X mode, +then it tries to use single MSI-X interrupt for all the events. If it fails, +then falls back to single legacy level-sensitive interrupt wire for all the +events. + +Interrupts registeration is done when the linux interface of the adapter is +openned, and de-registered when closing the interface. The systems interrupts +status can be viewed by the /proc/interrupts pseudo file. +when legacy mode used, the registered interrupt name will be: +al-eth-intx-all@pci: +when single MSI-X interrupt mode is used, the registered interrupt name will be: +al-eth-msix-all@pci: +and when per-queue MSI-X mode is used, the driver will register one management +interrupt named: +al-eth-msix-mgmt@pci: +and for each queue, an interrupt will be registered with the following name: +al-eth--comp-, where +queue type is tx for TX queues and rx for RX queues. + +The user can force the driver to use legacy interrupt mode by adding a module +parameter disable_msi with non-zero value. + + + +Media Type: +========= +1000Base-T: this type can be suppored when using RGMII MAC/Phy interface. +10GBase-SR: this type can be suppored when using 10Gpbs Serial mode, with +10G SR SFP+ module. + +MDIO interface: +============== +The driver implements the linux kernel driver for the MDIO bus. This bus will +be used to communitate with the Phy (when exist), this driver assumes the +kernel has phy driver that supports the connected Phy. + +The mdio driver is registered and used only when working in RGMII mode. + +Memory allocations: +================== +cache coherent buffers for following DMA rings: +- TX submission ring +- RX submission ring +- RX completion ring +those buffers allocated from open(), freed from close() + +tx_buffer_info/rx_buffer_info allocated using kzalloc from open(), frees from +close() + +RX buffers: +the driver supports two allocation mode: +1. frag allocation (default): buffer allocated using netdev_alloc_frag() +2. page allocation: buffer allocated using alloc_page() + +RX buffers allocation in both modes done from: +1. when enabling interface, open(). +2. once per rx poll for all the frames received and not copied to new + allocated skb (len < SMALL_PACKET_SIZE). + +those buffers freed on close() + +SKB: +the driver allocated skb for received frames from the RX handling from the +napi context. The allocation method depends on allocation mode and frames len, +when working in frag allocation mode, and the frame length is larger than +SMALL_PACKET_SIZE, the build_skb() used, otherwise the +netdev_alloc_skb_ip_align() is used. + +MULTIQUEUE: +=========== +as part of the TrueMultiCore(TM) technology, the driver support multiqueue mode for both TX and RX. + This mode have various benefits when queues are allocated to different CPU cores/threads +1. reduced CPU/thread/process contention on a given ethernet port when transmitting a packet. +2. cache miss rate on transmit completion is reduced, in particular for data cache lines + that hold the sk_buff structures +3. increase process-level parallelism when handling received packets. +4. increase data cache hit rate by steering kernel processing of packets to the CPU where + the application thread consuming the packet is running +5. in hardware interrupt re-direction + +TX queue selection: the driver optimized for the case where number of cpus equals to number + of queues, in this case, each cpu mapped to a single queue, this mapping done by the + API function al_eth_select_queue(). + +RX queue selection: the driver supports RSS and Accelerated RFS + - RSS: the driver configures the HW to select the queue using toeplitz hash on 4/2 tuple. + the hash output used as index for the hardware thash table that contains the output queue index. +RSS configuration: by default, the driver spreads the output queue index evenly +on all available queues (linux kernel recommended default setup). + The user can changes the distribution of the output queue using the ethtool. + for example. the following command will distribute 10% of the packets to queue 0, + 20% to queue 1 , etc. +#ethtool -x eth0 weight 10 20 30 40 + + - Accelerated RFS: when the kernel built with this mode, then is can direct the hardware to + steer flow for a specific cpu (queue). To enable this mode, the user needs to set the + interrupt affinity of the queues (MSI-X per queue mode must be used) to different cores, + also, the RFS kernel configuration must be set according to Documentation/networking/scale.txt + +Interrupts affinity: +------------------- +in order to utilize the multiqueue benefits, the per-queue MSI-X mode should be used, moreover, +the user must set the interrupts affinity of each of the TX and RX queue, the guidance is to have the +interrupts of TX and RX queue N routed to CPU N. + +DATA PATH: +========== +TX: +--- +al_eth_start_xmit() called by the stack, this function does the following: +- map data buffers (skb->data and frags) +- populate the HAL structure used to TX packet (hal_pkt) +- add the packet to tx ring +- call al_eth_pkt_tx() HAL functions that adds the packet to the TX UDMA hardware +- when the UDMA hardware finish sending the packet, a TX completion interrupt + will be raised, this interrupt may get delayed when using coalescing mode. +- the TX interrupt handler schedules NAPI +- the tx_poll function called, this functions gets the number of completed + descriptors (total_done),then is walks over the tx ring starting from the + next_to_clean pointer, the functions stops when the accumulated descriptors + reaches total_done. + +RX: +--- +- when packet received by the MAC layer, it is passed to the UDMA which in turn writes + it to the memory at rx buffer that previously allocated, the driver makes sure to set + the INTERRUPT bit in each of the rx descriptors, so an interrupt will be triggered + when a new packet is written to that descriptor. +- the RX interrupt handler schedules NAPI. +- the rx_poll function called, this functions calles the al_eth_pkt_rx() HAL functions, + the later function returns number of descriptors used for a new unhandled packet, and + zero if no new packet found. Then it called the al_eth_rx_skb() function. +- al_eth_rx_skb checks the packets len: + if the packet is too small (len less than AL_ETH_SMALL_PACKET_SIZE), the driver allocated + skb structure for the new packet, and copies the packet payload into the skb data buffer. + this way the original data buffer is not passed to the stack and reused for next rx packets. + else, the function unmaps the RX buffer, then allocated new skb structure and hooks the + rx buffer to the skb frags. + (*) coping the packet payload into skb data buffer for short packets is common optimization in + linux network drivers, this method saves allocating and mapping large buffers. + (**) allocating skb on packet reception also is a common method that has the following benefits + (i) skb is used and accessed immediately after allocation, this reduces possible cache misses. + (ii) number of 'inuse' sk_buff can be reduced to a very minimum, especially when packets are + dropped by the stack. + +- the new skb is updated with needed information (protocol, checksum hw calc result, etc), and + then passed to the network stack using the NAPI interface function napi_gro_receive(). + +10G kr auto-negotiation and link training: +========================================== +The driver implements an auto-negotiation and link training algorithm as follows: +A delayed work is scheduled periodically for each port to check the link status. +If Auto-negotiation and link training required and signal is detected, the algorithm starts. +Auto-negotiation: +----------------- + - Advertise the local capabilities to the remote side. + - Wait to receive page from the remote side. + - Get the remote capabilities. + - in case the remote capabilities support 10GBASE_KR link, link training will + be initiated. + +Link training: +-------------- + - The link training algorithm is divided to a receiver task and a transmitter task. + both tasks will continue to execute until both sides are done with their measurements. + - The receiver task gets the message from the remote, configures the serdes accordingly + and prepares the response message. + - The transmitter side follows a state machine to find the best Rx eye measurement. + The state machine sends change configuration messages to the remote, waiting for it + to be complete, and measure the eye, and choose the best score found. + +TODO: +===== + - use cached meta descriptors + - set interrupts affinity_hint + - cache alignment struct reorder optimizations. + - add prefetch optimizations in critical functions. + - cache dev into struct adapter + - cache the netdev features. + - check the case when kernel fails to allocate skb for rx. + - add support for csum offloading for ipv6 with options + - use cached meta descriptors + - interrupt handler for group D + - ethtool for rxnfs (RFS) + - TSO for packets larger than 64KB + - accelerated LRO + - MACSEC + - Flexible parser + - Fix auto-negotiation support for pause link on 1G + - Enable sending pause frames based on descriptors count. + - need to handle multicast addresses mhash table instead of mac table. diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_eth.c b/target/linux/alpine/files/drivers/net/ethernet/al/al_eth.c new file mode 100644 index 00000000000000..7091582ba45261 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_eth.c @@ -0,0 +1,4970 @@ +/* + * al_eth.c: AnnapurnaLabs Unified 1GbE and 10GbE ethernet driver. + * + * Copyright (C) 2014 Annapurna Labs Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "al_hal_eth.h" +#include "al_init_eth_lm.h" +#include "al_eth.h" +#include "al_eth_sysfs.h" + +#define DRV_MODULE_NAME "al_eth" +#define DRV_MODULE_VERSION "0.2" +#define DRV_MODULE_RELDATE "Feb 18, 2013" + +static char version[] = + "AnnapurnaLabs unified 1GbE and 10GbE Ethernet Driver " DRV_MODULE_NAME + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Saeed Bishara "); +MODULE_DESCRIPTION("AnnapurnaLabs unified 1GbE and 10GbE Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (5 * HZ) + +/* Time in mSec to keep trying to read / write from MDIO in case of error */ +#define MDIO_TIMEOUT_MSEC 100 + +static int disable_msi; + +module_param(disable_msi, int, 0); +MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/* indexed by board_t */ +static struct { + char *name; + unsigned int bar; /* needed for FPGA/NIC modes */ +} board_info[] = { + { .name = "AnnapurnaLabs unified 1Gbe/10Gbe" }, + { + .name = "AnnapurnaLabs unified 1Gbe/10Gbe pcie NIC", + .bar = 5, + }, + { + .name = "AnnapurnaLabs unified 1Gbe/10Gbe pcie FPGA NIC", + .bar = 0, + }, +}; + +static const struct pci_device_id al_eth_pci_tbl[] = { + { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, + PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_ETH, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + ALPINE_INTEGRATED }, + { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, + PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_ETH_ADVANCED, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, ALPINE_INTEGRATED }, + { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, + PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_ETH_NIC, PCI_ANY_ID, PCI_ANY_ID, + 0, 0, ALPINE_NIC }, + { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, + PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_ETH_FPGA_NIC, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, ALPINE_FPGA_NIC }, + { + 0, + } +}; + +MODULE_DEVICE_TABLE(pci, al_eth_pci_tbl); + +#ifdef CONFIG_AL_ETH_ALLOC_SKB +static DEFINE_PER_CPU(struct sk_buff_head, rx_recycle_cache); +#endif + +struct al_udma *al_eth_udma_get(struct al_eth_adapter *adapter, int tx) +{ + if (tx) + return &adapter->hal_adapter.tx_udma; + return &adapter->hal_adapter.rx_udma; +} + +#define MII_ADDR_C45 (1 << 30) + +/* MDIO */ +#define AL_ETH_MDIO_DEV_ID 1 +#define AL_ETH_MDIO_C45_DEV_MASK 0x1f0000 +#define AL_ETH_MDIO_C45_DEV_SHIFT 16 +#define AL_ETH_MDIO_C45_REG_MASK 0xffff + +static int al_mdio_read(struct mii_bus *bp, int mii_id, int reg) +{ + struct al_eth_adapter *adapter = bp->priv; + u16 value = 0; + int rc; + int timeout = MDIO_TIMEOUT_MSEC; + + adapter->phy_addr = mii_id; + + while (timeout > 0) { + if (reg & MII_ADDR_C45) { + al_dbg("%s [c45]: dev %x reg %x val %x\n", __func__, + ((reg & AL_ETH_MDIO_C45_DEV_MASK) >> + AL_ETH_MDIO_C45_DEV_SHIFT), + (reg & AL_ETH_MDIO_C45_REG_MASK), value); + rc = al_eth_mdio_read( + &adapter->hal_adapter, adapter->phy_addr, + ((reg & AL_ETH_MDIO_C45_DEV_MASK) >> + AL_ETH_MDIO_C45_DEV_SHIFT), + (reg & AL_ETH_MDIO_C45_REG_MASK), &value); + } else if ((adapter->phy_if == AL_ETH_BOARD_PHY_IF_XMDIO) && + (mii_id && 0x8000)) { + /* There is only 16bit variable from user-space, + so we used bit 32 of PHY ID to mark the C45 access */ + al_dbg("%s [c45]: dev %x reg %x val %x\n", __func__, + (mii_id & 0x1f), reg, value); + rc = al_eth_mdio_read(&adapter->hal_adapter, + adapter->phy_addr, + (mii_id & 0x1f), reg, &value); + } else { + rc = al_eth_mdio_read(&adapter->hal_adapter, + adapter->phy_addr, + MDIO_DEVAD_NONE, reg, &value); + } + + if (rc == 0) + return value; + + netdev_dbg(adapter->netdev, + "mdio read failed. try again in 10 msec\n"); + + timeout -= 10; + msleep(10); + } + + if (rc) + netdev_err(adapter->netdev, "MDIO read failed on timeout\n"); + + return value; +} + +static int al_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) +{ + struct al_eth_adapter *adapter = bp->priv; + int rc; + int timeout = MDIO_TIMEOUT_MSEC; + + adapter->phy_addr = mii_id; + + while (timeout > 0) { + if (reg & MII_ADDR_C45) { + al_dbg("%s [c45]: device %x reg %x val %x\n", __func__, + ((reg & AL_ETH_MDIO_C45_DEV_MASK) >> + AL_ETH_MDIO_C45_DEV_SHIFT), + (reg & AL_ETH_MDIO_C45_REG_MASK), val); + rc = al_eth_mdio_write( + &adapter->hal_adapter, adapter->phy_addr, + ((reg & AL_ETH_MDIO_C45_DEV_MASK) >> + AL_ETH_MDIO_C45_DEV_SHIFT), + (reg & AL_ETH_MDIO_C45_REG_MASK), val); + } else if ((adapter->phy_if == AL_ETH_BOARD_PHY_IF_XMDIO) && + (mii_id && 0x8000)) { + /* There is only 16bit variable from user-space, + so we used bit 32 of PHY ID to mark the C45 access */ + al_dbg("%s [c45]: dev %x reg %x val %x\n", __func__, + (mii_id & 0x1f), reg, val); + rc = al_eth_mdio_write(&adapter->hal_adapter, + adapter->phy_addr, + (mii_id & 0x1f), reg, val); + } else { + rc = al_eth_mdio_write(&adapter->hal_adapter, + adapter->phy_addr, + MDIO_DEVAD_NONE, reg, val); + } + + if (rc == 0) + return 0; + + netdev_err(adapter->netdev, + "mdio write failed. try again in 10 msec\n"); + + timeout -= 10; + msleep(10); + } + + if (rc) + netdev_err(adapter->netdev, "MDIO write failed on timeout\n"); + + return rc; +} + +static int al_eth_sw_mdio_probe(struct al_eth_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + struct device_node *np; + struct mii_bus *bus; + int ret; + + np = of_get_child_by_name(dev->of_node, "mdio"); + if (!np) { + dev_info(dev, "MDIO child node missing\n"); + return 0; + } + + bus = mdiobus_alloc(); + if (bus == NULL) { + ret = -ENOMEM; + goto fail_free_node; + } + + bus->name = "mdio-al-eth"; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-0", bus->name); + bus->priv = adapter; + bus->parent = dev; + bus->read = &al_mdio_read; + bus->write = &al_mdio_write; + + ret = of_mdiobus_register(bus, np); + if (ret) { + dev_err(dev, "cannot register MDIO bus\n"); + goto fail_free_bus; + } + + of_node_put(np); + + adapter->sw_mdio_bus = bus; + + return 0; + +fail_free_bus: + mdiobus_free(bus); + +fail_free_node: + of_node_put(np); + + return ret; +} + +static void al_eth_sw_mdio_remove(struct al_eth_adapter *adapter) +{ + if (adapter->sw_mdio_bus == NULL) + return; + + mdiobus_unregister(adapter->sw_mdio_bus); + mdiobus_free(adapter->sw_mdio_bus); + adapter->sw_mdio_bus = NULL; +} + +static int al_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *mdio = if_mii(ifr); + struct phy_device *phydev; + + al_dbg("ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", mdio->phy_id, + mdio->reg_num, mdio->val_in); + + if (adapter->mdio_bus) { + phydev = mdiobus_get_phy(adapter->mdio_bus, adapter->phy_addr); + if (phydev) + return phy_mii_ioctl(phydev, ifr, cmd); + } + + return -EOPNOTSUPP; +} +static int al_eth_flow_ctrl_config(struct al_eth_adapter *adapter); +static uint8_t al_eth_flow_ctrl_mutual_cap_get(struct al_eth_adapter *adapter); +static void al_eth_down(struct al_eth_adapter *adapter); +static int al_eth_up(struct al_eth_adapter *adapter); +static void al_eth_serdes_mode_set(struct al_eth_adapter *adapter); + +static void al_eth_adjust_link(struct net_device *dev) +{ + struct al_eth_adapter *adapter = netdev_priv(dev); + struct al_eth_link_config *link_config = &adapter->link_config; + struct phy_device *phydev = adapter->phydev; + enum al_eth_mac_mode mac_mode_needed = AL_ETH_MAC_MODE_RGMII; + int new_state = 0; + int force_1000_base_x = false; + + if (phydev->link) { + if (phydev->duplex != link_config->active_duplex) { + new_state = 1; + link_config->active_duplex = phydev->duplex; + } + + if (phydev->speed != link_config->active_speed) { + new_state = 1; + switch (phydev->speed) { + case SPEED_1000: + case SPEED_100: + case SPEED_10: + mac_mode_needed = + (adapter->mac_mode == + AL_ETH_MAC_MODE_RGMII) ? + AL_ETH_MAC_MODE_RGMII : + AL_ETH_MAC_MODE_10GbE_Serial; //AL_ETH_MAC_MODE_SGMII; + break; + case SPEED_10000: + case SPEED_5000: + case SPEED_2500: + mac_mode_needed = AL_ETH_MAC_MODE_10GbE_Serial; + break; + default: + if (netif_msg_link(adapter)) + netdev_warn( + adapter->netdev, + "Ack! Speed (%d) is not 10/100/1000!", + phydev->speed); + break; + } + link_config->active_speed = phydev->speed; + } + + if (!link_config->old_link) { + new_state = 1; + link_config->old_link = 1; + } + + if (new_state) { + int rc; + + if (adapter->mac_mode != mac_mode_needed) { + al_eth_down(adapter); + adapter->mac_mode = mac_mode_needed; + if (link_config->active_speed > 1000) { + al_eth_serdes_mode_set(adapter); + } else { + al_eth_serdes_mode_set(adapter); + force_1000_base_x = true; + } + al_eth_up(adapter); + } + + if (adapter->mac_mode != AL_ETH_MAC_MODE_10GbE_Serial) { + /* change the MAC link configuration */ + rc = al_eth_mac_link_config( + &adapter->hal_adapter, + force_1000_base_x, link_config->autoneg, + link_config->active_speed, + link_config->active_duplex ? AL_TRUE : + AL_FALSE); + if (rc) { + netdev_warn( + adapter->netdev, + "Failed to config the mac with the new link settings!"); + } + } + } + + if (link_config->flow_ctrl_supported & + AL_ETH_FLOW_CTRL_AUTONEG) { + uint8_t new_flow_ctrl = + al_eth_flow_ctrl_mutual_cap_get(adapter); + + if (new_flow_ctrl != link_config->flow_ctrl_active) { + link_config->flow_ctrl_active = new_flow_ctrl; + al_eth_flow_ctrl_config(adapter); + } + } + } else if (adapter->link_config.old_link) { + new_state = 1; + link_config->old_link = 0; + link_config->active_duplex = DUPLEX_UNKNOWN; + link_config->active_speed = SPEED_UNKNOWN; + } + + if (new_state && netif_msg_link(adapter)) + phy_print_status(phydev); +} + +static int al_eth_phy_init(struct al_eth_adapter *adapter) +{ + struct phy_device *phydev = phydev = + mdiobus_get_phy(adapter->mdio_bus, adapter->phy_addr); + u32 features, supported, advertising; + + adapter->link_config.old_link = 0; + adapter->link_config.active_duplex = DUPLEX_UNKNOWN; + adapter->link_config.active_speed = SPEED_UNKNOWN; + + /* Attach the MAC to the PHY. */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 9, 0) + phydev = phy_connect(adapter->netdev, phydev_name(phydev), + al_eth_adjust_link, PHY_INTERFACE_MODE_RGMII); +#else + phydev = phy_connect(adapter->netdev, dev_name(&phydev->dev), + al_eth_adjust_link, 0, PHY_INTERFACE_MODE_RGMII); +#endif + if (IS_ERR(phydev)) { + netdev_err(adapter->netdev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + phy_attached_info(phydev); + + ethtool_convert_link_mode_to_legacy_u32(&features, PHY_GBIT_FEATURES); + ethtool_convert_link_mode_to_legacy_u32(&supported, phydev->supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + phydev->advertising); + + /* Mask with MAC supported features. */ + supported &= (features | SUPPORTED_Pause | SUPPORTED_Asym_Pause); + + advertising = supported; + + ethtool_convert_legacy_u32_to_link_mode(phydev->supported, supported); + ethtool_convert_legacy_u32_to_link_mode(phydev->advertising, + advertising); + + adapter->phydev = phydev; + /* Bring the PHY up */ + phy_start(adapter->phydev); + + return 0; +} + +/** + * al_eth_mdiobus_setup - initialize mdiobus and register to kernel + * + * + **/ +static int al_eth_mdiobus_setup(struct al_eth_adapter *adapter) +{ + struct phy_device *phydev; + int i; + int ret = 0; + + adapter->mdio_bus = mdiobus_alloc(); + if (adapter->mdio_bus == NULL) + return -ENOMEM; + + adapter->mdio_bus->name = "al mdio bus"; + snprintf(adapter->mdio_bus->id, MII_BUS_ID_SIZE, "%x", + (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); + adapter->mdio_bus->priv = adapter; + adapter->mdio_bus->parent = &adapter->pdev->dev; + adapter->mdio_bus->read = &al_mdio_read; + adapter->mdio_bus->write = &al_mdio_write; + adapter->mdio_bus->phy_mask = ~(1 << adapter->phy_addr); + + for (i = 0; i < PHY_MAX_ADDR; i++) + adapter->mdio_bus->irq[i] = PHY_POLL; + + printk("phy_if=%d\n", adapter->phy_if); + + if (adapter->phy_if != AL_ETH_BOARD_PHY_IF_XMDIO) { + i = mdiobus_register(adapter->mdio_bus); + if (i) { + netdev_warn(adapter->netdev, + "mdiobus_reg failed (0x%x)\n", i); + mdiobus_free(adapter->mdio_bus); + return i; + } + phydev = mdiobus_get_phy(adapter->mdio_bus, adapter->phy_addr); + } else { + adapter->mdio_bus->phy_mask = 0xffffffff; + i = mdiobus_register(adapter->mdio_bus); + if (i) { + netdev_warn(adapter->netdev, + "mdiobus_reg failed (0x%x)\n", i); + mdiobus_free(adapter->mdio_bus); + return i; + } + + phydev = get_phy_device(adapter->mdio_bus, adapter->phy_addr, + true); + if (!phydev) { + pr_err("phy device get failed\n"); + goto error; + } + + ret = phy_device_register(phydev); + if (ret) { + pr_err("phy device register failed\n"); + goto error; + } + } + + if (!phydev || !phydev->drv) { + if (phydev) + return 0; + printk("phydev null!\n"); + goto error; + } + + return 0; + +error: + netdev_warn(adapter->netdev, "No PHY devices\n"); + mdiobus_unregister(adapter->mdio_bus); + mdiobus_free(adapter->mdio_bus); + return -ENODEV; +} + +/** + * al_eth_mdiobus_teardown - mdiobus unregister + * + * + **/ +static void al_eth_mdiobus_teardown(struct al_eth_adapter *adapter) +{ + if (!adapter->mdio_bus) + return; + + mdiobus_unregister(adapter->mdio_bus); + mdiobus_free(adapter->mdio_bus); + phy_device_free(adapter->phydev); +} + +static void al_eth_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct al_eth_adapter *adapter = netdev_priv(dev); + + if (netif_msg_tx_err(adapter)) + netdev_err(dev, "transmit timed out!!!!\n"); +} + +static int al_eth_change_mtu(struct net_device *dev, int new_mtu) +{ + struct al_eth_adapter *adapter = netdev_priv(dev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((new_mtu < AL_ETH_MIN_FRAME_LEN) || (new_mtu > AL_ETH_MAX_MTU) || + (max_frame > AL_ETH_MAX_FRAME_LEN)) { + netdev_err(dev, "Invalid MTU setting\n"); + return -EINVAL; + } + + netdev_dbg(adapter->netdev, "set MTU to %d\n", new_mtu); + al_eth_rx_pkt_limit_config(&adapter->hal_adapter, AL_ETH_MIN_FRAME_LEN, + max_frame); + + dev->mtu = new_mtu; + + al_eth_tso_mss_config(&adapter->hal_adapter, 0, dev->mtu - 100); + + return 0; +} + +static int al_init_rx_cpu_rmap(struct al_eth_adapter *adapter); + +int al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val) +{ + /* handle is the base address of the adapter */ + *val = readl(handle + where); + return 0; +} + +int al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val) +{ + /* handle is the base address of the adapter */ + writel(val, handle + where); + return 0; +} + +int al_eth_read_pci_config(void *handle, int where, uint32_t *val) +{ + /* handle is a pointer to the pci_dev */ + pci_read_config_dword((struct pci_dev *)handle, where, val); + return 0; +} + +int al_eth_write_pci_config(void *handle, int where, uint32_t val) +{ + /* handle is a pointer to the pci_dev */ + pci_write_config_dword((struct pci_dev *)handle, where, val); + return 0; +} + +static int al_eth_function_reset(struct al_eth_adapter *adapter) +{ + struct al_eth_board_params params; + int rc; + + /* save board params so we restore it after reset */ + al_eth_board_params_get(adapter->mac_base, ¶ms); + al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr); + if (adapter->board_type == ALPINE_INTEGRATED) + rc = al_eth_flr_rmn(&al_eth_read_pci_config, + &al_eth_write_pci_config, adapter->pdev, + adapter->mac_base); + else + rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config, + &al_eth_fpga_write_pci_config, + adapter->internal_pcie_base, + adapter->mac_base); + + /* restore params */ + al_eth_board_params_set(adapter->mac_base, ¶ms); + al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr); + return rc; +} + +static int al_eth_setup_int_mode(struct al_eth_adapter *adapter, int dis_msi); +static int al_eth_board_params_init(struct al_eth_adapter *adapter) +{ + if (adapter->board_type == ALPINE_NIC) { + adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; + adapter->sfp_detection_needed = false; + adapter->phy_exist = false; + adapter->an_en = false; + adapter->lt_en = false; + adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ; + adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; + } else if (adapter->board_type == ALPINE_FPGA_NIC) { + adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; + adapter->sfp_detection_needed = false; + adapter->phy_exist = false; + adapter->an_en = false; + adapter->lt_en = false; + adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ; + adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; + } else { + struct al_eth_board_params params; + int rc; + + adapter->auto_speed = false; + + rc = al_eth_board_params_get(adapter->mac_base, ¶ms); + if (rc) { + dev_err(&adapter->pdev->dev, + "board info not available\n"); + return -1; + } + + adapter->phy_exist = params.phy_exist == AL_TRUE; + adapter->phy_addr = params.phy_mdio_addr; + adapter->an_en = params.autoneg_enable; + adapter->lt_en = params.kr_lt_enable; + adapter->serdes_grp = params.serdes_grp; + adapter->serdes_lane = params.serdes_lane; + adapter->sfp_detection_needed = params.sfp_plus_module_exist; + adapter->i2c_adapter_id = params.i2c_adapter_id; + adapter->ref_clk_freq = params.ref_clk_freq; + adapter->dont_override_serdes = params.dont_override_serdes; + adapter->link_config.active_duplex = !params.half_duplex; + adapter->link_config.autoneg = + (adapter->phy_exist) ? (params.an_mode == + AL_ETH_BOARD_AUTONEG_IN_BAND) : + (!params.an_disable); + adapter->link_config.force_1000_base_x = + params.force_1000_base_x; + adapter->retimer.exist = params.retimer_exist; + adapter->retimer.bus_id = params.retimer_bus_id; + adapter->retimer.i2c_addr = params.retimer_i2c_addr; + adapter->retimer.channel = params.retimer_channel; + adapter->phy_if = params.phy_if; + + switch (params.speed) { + case AL_ETH_BOARD_1G_SPEED_1000M: + adapter->link_config.active_speed = 1000; + break; + case AL_ETH_BOARD_1G_SPEED_100M: + adapter->link_config.active_speed = 100; + break; + case AL_ETH_BOARD_1G_SPEED_10M: + adapter->link_config.active_speed = 10; + break; + default: + dev_warn(&adapter->pdev->dev, + "%s: invalid speed (%d)\n", __func__, + params.speed); + adapter->link_config.active_speed = 1000; + } + + switch (params.mdio_freq) { + case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ: + adapter->mdio_freq = 2500; + break; + case AL_ETH_BOARD_MDIO_FREQ_1_MHZ: + adapter->mdio_freq = 1000; + break; + default: + dev_warn(&adapter->pdev->dev, + "%s: invalid mdio freq (%d)\n", __func__, + params.mdio_freq); + adapter->mdio_freq = 2500; + } + + switch (params.media_type) { + case AL_ETH_BOARD_MEDIA_TYPE_RGMII: + if (params.sfp_plus_module_exist == AL_TRUE) + /* Backward compatibility */ + adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; + else + adapter->mac_mode = AL_ETH_MAC_MODE_RGMII; + + adapter->use_lm = false; + break; + case AL_ETH_BOARD_MEDIA_TYPE_SGMII: + adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; + adapter->use_lm = true; + break; + case AL_ETH_BOARD_MEDIA_TYPE_SGMII_2_5G: + adapter->mac_mode = AL_ETH_MAC_MODE_SGMII_2_5G; + adapter->use_lm = false; + break; + case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR: + adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; + adapter->use_lm = true; + break; + case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT: + adapter->sfp_detection_needed = AL_TRUE; + adapter->auto_speed = false; + adapter->use_lm = true; + break; + case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED: + adapter->sfp_detection_needed = AL_TRUE; + adapter->auto_speed = true; + adapter->mac_mode_set = false; + adapter->use_lm = true; + break; + case AL_ETH_BOARD_MEDIA_TYPE_NBASE_T: + adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; + adapter->phy_fixup_needed = true; + break; + default: + dev_err(&adapter->pdev->dev, + "%s: unsupported media type %d\n", __func__, + params.media_type); + return -1; + } + dev_info( + &adapter->pdev->dev, + "Board info: board type %d ext phy exists %s, ext phy addr %x, mdio freq %u Khz, SFP connected %s, media %d\n", + adapter->board_type, + params.phy_exist == AL_TRUE ? "Yes" : "No", + params.phy_mdio_addr, adapter->mdio_freq, + params.sfp_plus_module_exist == AL_TRUE ? "Yes" : "No", + params.media_type); + } + + al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr); + + return 0; +} + +static inline void al_eth_flow_ctrl_init(struct al_eth_adapter *adapter) +{ + uint8_t default_flow_ctrl; + + default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE; + default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE; + + adapter->link_config.flow_ctrl_supported = default_flow_ctrl; +} + +static uint8_t al_eth_flow_ctrl_mutual_cap_get(struct al_eth_adapter *adapter) +{ + struct phy_device *phydev = + mdiobus_get_phy(adapter->mdio_bus, adapter->phy_addr); + struct al_eth_link_config *link_config = &adapter->link_config; + uint8_t peer_flow_ctrl = AL_ETH_FLOW_CTRL_AUTONEG; + uint8_t new_flow_ctrl = AL_ETH_FLOW_CTRL_AUTONEG; + + if (phydev->pause) + peer_flow_ctrl |= + (AL_ETH_FLOW_CTRL_TX_PAUSE | AL_ETH_FLOW_CTRL_RX_PAUSE); + if (phydev->asym_pause) + peer_flow_ctrl ^= (AL_ETH_FLOW_CTRL_RX_PAUSE); + + /* + * in autoneg mode, supported flow ctrl is also + * the current advertising + */ + if ((peer_flow_ctrl & AL_ETH_FLOW_CTRL_TX_PAUSE) == + (link_config->flow_ctrl_supported & AL_ETH_FLOW_CTRL_TX_PAUSE)) + new_flow_ctrl |= AL_ETH_FLOW_CTRL_TX_PAUSE; + if ((peer_flow_ctrl & AL_ETH_FLOW_CTRL_RX_PAUSE) == + (link_config->flow_ctrl_supported & AL_ETH_FLOW_CTRL_RX_PAUSE)) + new_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE; + + return new_flow_ctrl; +} + +static int al_eth_flow_ctrl_config(struct al_eth_adapter *adapter) +{ + struct al_eth_flow_control_params *flow_ctrl_params; + uint8_t active = adapter->link_config.flow_ctrl_active; + int i; + + flow_ctrl_params = &adapter->flow_ctrl_params; + + flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE; + flow_ctrl_params->obay_enable = + ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0); + flow_ctrl_params->gen_enable = + ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0); + + flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH; + flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW; + flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA; + flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH; + + /* map priority to queue index, queue id = priority/2 */ + for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++) + flow_ctrl_params->prio_q_map[0][i] = 1 << (i >> 1); + + al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params); + + return 0; +} + +static void al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter) +{ + /* change the active configuration to the default / force by ethtool + * and call to configure */ + adapter->link_config.flow_ctrl_active = + adapter->link_config.flow_ctrl_supported; + + al_eth_flow_ctrl_config(adapter); +} + +static void al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter) +{ + adapter->link_config.flow_ctrl_active = 0; + al_eth_flow_ctrl_config(adapter); +} + +static int al_eth_hw_init_adapter(struct al_eth_adapter *adapter) +{ + struct al_eth_adapter_params *params = &adapter->eth_hal_params; + int rc; + + params->dev_id = adapter->dev_id; + params->rev_id = adapter->rev_id; + params->udma_id = 0; + params->enable_rx_parser = 1; /* enable rx epe parser*/ + params->udma_regs_base = + adapter->udma_base; /* UDMA register base address */ + params->ec_regs_base = + adapter->ec_base; /* Ethernet controller registers base address */ + params->mac_regs_base = + adapter->mac_base; /* Ethernet MAC registers base address */ + params->name = adapter->name; + + rc = al_eth_adapter_init(&adapter->hal_adapter, params); + if (rc) + dev_err(&adapter->pdev->dev, "%s failed at hal init!\n", + __func__); + + if ((adapter->board_type == ALPINE_NIC) || + (adapter->board_type == ALPINE_FPGA_NIC)) { + /* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */ + if (adapter->rev_id > AL_ETH_REV_ID_0) { + struct al_udma_gen_vmid_conf conf; + int i; + for (i = 0; i < DMA_MAX_Q; i++) { + conf.tx_q_conf[i].queue_en = AL_TRUE; + conf.tx_q_conf[i].desc_en = AL_FALSE; + conf.tx_q_conf[i].vmid = + 0x100; /* for access from PCIE0 */ + conf.rx_q_conf[i].queue_en = AL_TRUE; + conf.rx_q_conf[i].desc_en = AL_FALSE; + conf.rx_q_conf[i].vmid = + 0x100; /* for access from PCIE0 */ + } + al_udma_gen_vmid_conf_set(adapter->udma_base, &conf); + } + } + return rc; +} + +static int al_eth_hw_init(struct al_eth_adapter *adapter) +{ + int rc; + + rc = al_eth_hw_init_adapter(adapter); + if (rc) + return rc; + + rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode); + if (rc < 0) { + dev_err(&adapter->pdev->dev, "%s failed to configure mac!\n", + __func__); + return rc; + } + + if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) || + (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII && + adapter->phy_exist == AL_FALSE)) { + rc = al_eth_mac_link_config( + &adapter->hal_adapter, + adapter->link_config.force_1000_base_x, + adapter->link_config.autoneg, + adapter->link_config.active_speed, + adapter->link_config.active_duplex); + if (rc) { + dev_err(&adapter->pdev->dev, + "%s failed to configure link parameters!\n", + __func__); + return rc; + } + } + + rc = al_eth_mdio_config(&adapter->hal_adapter, + (adapter->phy_if == AL_ETH_BOARD_PHY_IF_XMDIO) ? + AL_ETH_MDIO_TYPE_CLAUSE_45 : + AL_ETH_MDIO_TYPE_CLAUSE_22, + AL_TRUE /*shared_mdio_if*/, + adapter->ref_clk_freq, adapter->mdio_freq); + if (rc) { + dev_err(&adapter->pdev->dev, "%s failed at mdio config!\n", + __func__); + return rc; + } + + al_eth_flow_ctrl_init(adapter); + + return rc; +} + +static int al_eth_udma_queues_disable_all(struct al_eth_adapter *adapter); + +static int al_eth_hw_stop(struct al_eth_adapter *adapter) +{ + al_eth_mac_stop(&adapter->hal_adapter); + + /* wait till pending rx packets written and UDMA becomes idle, + * the MAC has ~10KB fifo, 10us should be enought time for the + * UDMA to write to the memory + */ + udelay(10); + + /* disable hw queues */ + al_eth_udma_queues_disable_all(adapter); + al_eth_adapter_stop(&adapter->hal_adapter); + + /* disable flow ctrl to avoid pause packets*/ + al_eth_flow_ctrl_disable(adapter); + + return 0; +} + +static int al_eth_udma_queue_enable(struct al_eth_adapter *adapter, + enum al_udma_type type, int qid) +{ + int rc = 0; + char *name = (type == UDMA_TX) ? "Tx" : "Rx"; + struct al_udma_q_params *q_params; + + if (type == UDMA_TX) + q_params = &adapter->tx_ring[qid].q_params; + else + q_params = &adapter->rx_ring[qid].q_params; + + rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params); + if (rc < 0) { + netdev_err(adapter->netdev, "config %s queue %u failed\n", name, + qid); + return rc; + } + + rc = al_eth_queue_enable(&adapter->hal_adapter, type, qid); + + if (rc < 0) + netdev_err(adapter->netdev, "enable %s queue %u failed\n", name, + qid); + + return rc; +} + +static int al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + al_eth_udma_queue_enable(adapter, UDMA_TX, i); + + for (i = 0; i < adapter->num_rx_queues; i++) + al_eth_udma_queue_enable(adapter, UDMA_RX, i); + return 0; +} + +static int al_eth_udma_queue_disable(struct al_eth_adapter *adapter, + enum al_udma_type type, int qid) +{ + int rc = 0; + char *name = (type == UDMA_TX) ? "Tx" : "Rx"; + + rc = al_eth_queue_disable(&adapter->hal_adapter, type, qid); + + if (rc < 0) + netdev_err(adapter->netdev, "disable %s queue %u failed\n", + name, qid); + + return rc; +} + +static int al_eth_udma_queues_disable_all(struct al_eth_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + al_eth_udma_queue_disable(adapter, UDMA_TX, i); + + for (i = 0; i < adapter->num_rx_queues; i++) + al_eth_udma_queue_disable(adapter, UDMA_RX, i); + return 0; +} + +static void al_eth_init_rings(struct al_eth_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct al_eth_ring *ring = &adapter->tx_ring[i]; + + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i, + &ring->dma_q); + ring->sw_count = adapter->tx_ring_count; + ring->hw_count = adapter->tx_descs_count; + ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get( + (struct unit_regs *)adapter->udma_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C); + ring->unmask_val = ~(1 << i); + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct al_eth_ring *ring = &adapter->rx_ring[i]; + + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->napi = + &adapter->al_napi[AL_ETH_RXQ_NAPI_IDX(adapter, i)].napi; + al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, + &ring->dma_q); + ring->sw_count = adapter->rx_ring_count; + ring->hw_count = adapter->rx_descs_count; + ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get( + (struct unit_regs *)adapter->udma_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B); + ring->unmask_val = ~(1 << i); + } +} + +/** + * al_eth_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: network interface device structure + * @qid: queue index + * + * Return 0 on success, negative on failure + **/ +static int al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid) +{ + struct al_eth_ring *tx_ring = &adapter->tx_ring[qid]; + struct device *dev = tx_ring->dev; + struct al_udma_q_params *q_params = &tx_ring->q_params; + int size; + + size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count; + + tx_ring->tx_buffer_info = kzalloc(size, GFP_KERNEL); + if (!tx_ring->tx_buffer_info) + return -ENOMEM; + + /* TODO: consider ALIGN to page size */ + tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc); + q_params->size = tx_ring->hw_count; + + q_params->desc_base = dma_alloc_coherent( + dev, tx_ring->descs_size, &q_params->desc_phy_base, GFP_KERNEL); + + if (!q_params->desc_base) + return -ENOMEM; + + q_params->cdesc_base = NULL; /* completion queue not used for tx */ + q_params->cdesc_size = 8; + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + return 0; +} + +/** + * al_eth_free_tx_resources - Free Tx Resources per Queue + * @adapter: network interface device structure + * @qid: queue index + * + * Free all transmit software resources + **/ +static void al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid) +{ + struct al_eth_ring *tx_ring = &adapter->tx_ring[qid]; + struct al_udma_q_params *q_params = &tx_ring->q_params; + + netdev_dbg(adapter->netdev, "%s qid %d\n", __func__, qid); + + kfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!q_params->desc_base) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->descs_size, + q_params->desc_base, q_params->desc_phy_base); + + q_params->desc_base = NULL; +} + +/** + * al_eth_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: private structure + * + * Return 0 on success, negative on failure + **/ +static int al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter) +{ + int i, rc = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + rc = al_eth_setup_tx_resources(adapter, i); + if (!rc) + continue; + + netdev_err(adapter->netdev, + "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + al_eth_free_tx_resources(adapter, i); + return rc; +} + +/** + * al_eth_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void al_eth_free_all_tx_resources(struct al_eth_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i].q_params.desc_base) + al_eth_free_tx_resources(adapter, i); +} + +/** + * al_eth_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: network interface device structure + * @qid: queue index + * + * Returns 0 on success, negative on failure + **/ +static int al_eth_setup_rx_resources(struct al_eth_adapter *adapter, + unsigned int qid) +{ + struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; + struct device *dev = rx_ring->dev; + struct al_udma_q_params *q_params = &rx_ring->q_params; + int size; + + size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count; + + /* alloc extra element so in rx path we can always prefetch rx_info + 1*/ + size += 1; + + rx_ring->rx_buffer_info = kzalloc(size, GFP_KERNEL); + if (!rx_ring->rx_buffer_info) + return -ENOMEM; + + /* TODO: consider Round up to nearest 4K */ + rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc); + q_params->size = rx_ring->hw_count; + + q_params->desc_base = dma_alloc_coherent( + dev, rx_ring->descs_size, &q_params->desc_phy_base, GFP_KERNEL); + if (!q_params->desc_base) + return -ENOMEM; + + q_params->cdesc_size = 16; + rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size; + q_params->cdesc_base = dma_alloc_coherent(dev, rx_ring->cdescs_size, + &q_params->cdesc_phy_base, + GFP_KERNEL); + if (!q_params->cdesc_base) + return -ENOMEM; + + /* Zero out the descriptor ring */ + memset(q_params->cdesc_base, 0, rx_ring->cdescs_size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; +} + +/** + * al_eth_free_rx_resources - Free Rx Resources + * @adapter: network interface device structure + * @qid: queue index + * + * Free all receive software resources + **/ +static void al_eth_free_rx_resources(struct al_eth_adapter *adapter, + unsigned int qid) +{ + struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; + struct al_udma_q_params *q_params = &rx_ring->q_params; + + kfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!q_params->desc_base) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->descs_size, + q_params->desc_base, q_params->desc_phy_base); + + q_params->desc_base = NULL; + + /* if not set, then don't free */ + if (!q_params->cdesc_base) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->cdescs_size, + q_params->cdesc_base, q_params->cdesc_phy_base); + + q_params->cdesc_phy_base = 0; +} + +/** + * al_eth_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter) +{ + int i, rc = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + rc = al_eth_setup_rx_resources(adapter, i); + if (!rc) + continue; + + netdev_err(adapter->netdev, + "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + return 0; + +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + al_eth_free_rx_resources(adapter, i); + return rc; +} + +/** + * al_eth_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void al_eth_free_all_rx_resources(struct al_eth_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i].q_params.desc_base) + al_eth_free_rx_resources(adapter, i); +} + +#ifdef CONFIG_AL_ETH_ALLOC_PAGE +static inline int al_eth_alloc_rx_page(struct al_eth_adapter *adapter, + struct al_eth_rx_buffer *rx_info, + gfp_t gfp) +{ + struct al_buf *al_buf; + struct page *page; + dma_addr_t dma; + + /* if previous allocated page is not used */ + if (rx_info->page != NULL) + return 0; + + page = alloc_page(gfp); + if (unlikely(!page)) + return -ENOMEM; + + dma = dma_map_page(&adapter->pdev->dev, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&adapter->pdev->dev, dma))) { + __free_page(page); + return -EIO; + } + dev_dbg(&adapter->pdev->dev, "alloc page %p, rx_info %p\n", page, + rx_info); + + rx_info->page = page; + rx_info->page_offset = 0; + al_buf = &rx_info->al_buf; + dma_unmap_addr_set(al_buf, addr, dma); + dma_unmap_addr_set(rx_info, dma, dma); + dma_unmap_len_set(al_buf, len, PAGE_SIZE); + return 0; +} + +static void al_eth_free_rx_page(struct al_eth_adapter *adapter, + struct al_eth_rx_buffer *rx_info) +{ + struct page *page = rx_info->page; + struct al_buf *al_buf = &rx_info->al_buf; + + if (!page) + return; + + dma_unmap_page(&adapter->pdev->dev, dma_unmap_addr(al_buf, addr), + PAGE_SIZE, DMA_FROM_DEVICE); + + __free_page(page); + rx_info->page = NULL; +} + +#elif defined(CONFIG_AL_ETH_ALLOC_FRAG) + +static inline int al_eth_alloc_rx_frag(struct al_eth_adapter *adapter, + struct al_eth_ring *rx_ring, + struct al_eth_rx_buffer *rx_info) +{ + struct al_buf *al_buf; + dma_addr_t dma; + u8 *data; + + /* if previous allocated frag is not used */ + if (rx_info->data != NULL) + return 0; + + rx_info->data_size = min_t( + unsigned int, + (rx_ring->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN), + adapter->max_rx_buff_alloc_size); + + rx_info->data_size = max_t(unsigned int, rx_info->data_size, + AL_ETH_DEFAULT_MIN_RX_BUFF_ALLOC_SIZE); + + rx_info->frag_size = + SKB_DATA_ALIGN(rx_info->data_size + AL_ETH_RX_OFFSET) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + data = netdev_alloc_frag(rx_info->frag_size); + + if (!data) + return -ENOMEM; + + dma = dma_map_single(rx_ring->dev, data + AL_ETH_RX_OFFSET, + rx_info->data_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { + put_page(virt_to_head_page(data)); + return -EIO; + } + netdev_dbg(rx_ring->netdev, + "alloc frag %p, rx_info %p len %x skb size %x\n", data, + rx_info, rx_info->data_size, rx_info->frag_size); + + rx_info->data = data; + + BUG_ON(!virt_addr_valid(rx_info->data)); + rx_info->page = virt_to_head_page(rx_info->data); + rx_info->page_offset = (uintptr_t)rx_info->data - + (uintptr_t)page_address(rx_info->page); + al_buf = &rx_info->al_buf; + dma_unmap_addr_set(al_buf, addr, dma); + dma_unmap_addr_set(rx_info, dma, dma); + dma_unmap_len_set(al_buf, len, rx_info->data_size); + return 0; +} + +static void al_eth_free_rx_frag(struct al_eth_adapter *adapter, + struct al_eth_rx_buffer *rx_info) +{ + u8 *data = rx_info->data; + struct al_buf *al_buf = &rx_info->al_buf; + + if (!data) + return; + + dma_unmap_single(&adapter->pdev->dev, dma_unmap_addr(al_buf, addr), + rx_info->data_size, DMA_FROM_DEVICE); + + put_page(virt_to_head_page(data)); + rx_info->data = NULL; +} + +#elif defined(CONFIG_AL_ETH_ALLOC_SKB) + +static inline int al_eth_alloc_rx_skb(struct al_eth_adapter *adapter, + struct al_eth_ring *rx_ring, + struct al_eth_rx_buffer *rx_info) +{ + struct sk_buff *skb; + struct al_buf *al_buf; + dma_addr_t dma; + struct sk_buff_head *rx_rc = &__get_cpu_var(rx_recycle_cache); + + if (rx_info->skb) + return 0; + + rx_info->data_size = + rx_ring->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + rx_info->data_size = max_t(unsigned int, rx_info->data_size, + AL_ETH_DEFAULT_MIN_RX_BUFF_ALLOC_SIZE); + + skb = __skb_dequeue(rx_rc); + if (skb == NULL) + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_info->data_size, GFP_DMA); + + if (!skb) + return -ENOMEM; + + dma = dma_map_single(rx_ring->dev, skb->data + AL_ETH_RX_OFFSET, + rx_info->data_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rx_ring->dev, dma))) + return -EIO; + + rx_info->data = skb->data; + rx_info->skb = skb; + + BUG_ON(!virt_addr_valid(rx_info->data)); + al_buf = &rx_info->al_buf; + dma_unmap_addr_set(al_buf, addr, dma); + dma_unmap_addr_set(rx_info, dma, dma); + dma_unmap_len_set(al_buf, len, rx_info->data_size); + return 0; +} + +static void al_eth_free_rx_skb(struct al_eth_adapter *adapter, + struct al_eth_rx_buffer *rx_info) +{ + struct al_buf *al_buf = &rx_info->al_buf; + + if (!rx_info->skb) + return; + + dma_unmap_single(&adapter->pdev->dev, dma_unmap_addr(al_buf, addr), + rx_info->data_size, DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_info->skb); + rx_info->skb = NULL; +} +#endif + +static int al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, + unsigned int qid, unsigned int num) +{ + struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; + u16 next_to_use; + unsigned int i; + + next_to_use = rx_ring->next_to_use; + + for (i = 0; i < num; i++) { + int rc; + struct al_eth_rx_buffer *rx_info = + &rx_ring->rx_buffer_info[next_to_use]; + +#ifdef CONFIG_AL_ETH_ALLOC_PAGE + if (unlikely(al_eth_alloc_rx_page(adapter, rx_info, + __GFP_COLD | GFP_ATOMIC | + __GFP_COMP) < 0)) { +#elif defined(CONFIG_AL_ETH_ALLOC_FRAG) + if (unlikely(al_eth_alloc_rx_frag(adapter, rx_ring, rx_info) < + 0)) { +#elif defined(CONFIG_AL_ETH_ALLOC_SKB) + if (unlikely(al_eth_alloc_rx_skb(adapter, rx_ring, rx_info) < + 0)) { +#endif + netdev_warn(adapter->netdev, + "failed to alloc buffer for rx queue %d\n", + qid); + break; + } + rc = al_eth_rx_buffer_add(rx_ring->dma_q, &rx_info->al_buf, + AL_ETH_RX_FLAGS_INT, NULL); + if (unlikely(rc)) { + netdev_warn(adapter->netdev, + "failed to add buffer for rx queue %d\n", + qid); + break; + } + next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use); + } + + if (unlikely(i < num)) { + netdev_warn( + adapter->netdev, + "refilled rx queue %d with %d pages only - available %d\n", + qid, i, al_udma_available_get(rx_ring->dma_q)); + } + + if (likely(i)) + al_eth_rx_buffer_action(rx_ring->dma_q, i); + + rx_ring->next_to_use = next_to_use; + + return i; +} + +static void al_eth_free_rx_bufs(struct al_eth_adapter *adapter, + unsigned int qid) +{ + struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; + unsigned int i; + + for (i = 0; i < AL_ETH_DEFAULT_RX_DESCS; i++) { + struct al_eth_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; + +#ifdef CONFIG_AL_ETH_ALLOC_PAGE + if (rx_info->page) + al_eth_free_rx_page(adapter, rx_info); +#elif defined(CONFIG_AL_ETH_ALLOC_FRAG) + if (rx_info->data) + al_eth_free_rx_frag(adapter, rx_info); +#elif defined(CONFIG_AL_ETH_ALLOC_SKB) + if (rx_info->skb) + al_eth_free_rx_skb(adapter, rx_info); +#endif + } +} + +/** + * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers + * @adapter: board private structure + * + **/ +static void al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1); +} + +static void al_eth_free_all_rx_bufs(struct al_eth_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + al_eth_free_rx_bufs(adapter, i); +} + +/** + * al_eth_free_tx_bufs - Free Tx Buffers per Queue + * @adapter: network interface device structure + * @qid: queue index + **/ +static void al_eth_free_tx_bufs(struct al_eth_adapter *adapter, + unsigned int qid) +{ + struct al_eth_ring *tx_ring = &adapter->tx_ring[qid]; + unsigned int i; + bool udma_debug_printed = 0; + + for (i = 0; i < AL_ETH_DEFAULT_TX_SW_DESCS; i++) { + struct al_eth_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; + struct al_buf *al_buf; + int nr_frags; + int j; + + if (tx_info->skb == NULL) + continue; + + if (!udma_debug_printed) { + al_udma_regs_print(tx_ring->dma_q->udma, + AL_UDMA_DEBUG_QUEUE(qid)); + al_udma_q_struct_print(tx_ring->dma_q->udma, qid); + udma_debug_printed = 1; + } + netdev_warn(adapter->netdev, + "free uncompleted tx skb qid %d idx 0x%x\n", qid, + i); + + al_buf = tx_info->hal_pkt.bufs; + dma_unmap_single(&adapter->pdev->dev, + dma_unmap_addr(al_buf, addr), + dma_unmap_len(al_buf, len), DMA_TO_DEVICE); + + /* unmap remaining mapped pages */ + nr_frags = tx_info->hal_pkt.num_of_bufs - 1; + for (j = 0; j < nr_frags; j++) { + al_buf++; + dma_unmap_page(&adapter->pdev->dev, + dma_unmap_addr(al_buf, addr), + dma_unmap_len(al_buf, len), + DMA_TO_DEVICE); + } + + dev_kfree_skb_any(tx_info->skb); + } + netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, qid)); +} + +static void al_eth_free_all_tx_bufs(struct al_eth_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + al_eth_free_tx_bufs(adapter, i); +} + +#ifdef CONFIG_AL_ETH_ALLOC_SKB +/* the following 3 functions taken from old kernels */ +static bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) +{ + if (irqs_disabled()) + return false; + + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) + return false; + + if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) + return false; + + skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); + if (skb_end_offset(skb) < skb_size) + return false; + + if (skb_shared(skb) || skb_cloned(skb)) + return false; + + return true; +} + +/** + * skb_recycle - clean up an skb for reuse + * @skb: buffer + * + * Recycles the skb to be reused as a receive buffer. This + * function does any necessary reference count dropping, and + * cleans up the skbuff as if it just came from __alloc_skb(). + */ +void skb_recycle(struct sk_buff *skb) +{ + struct skb_shared_info *shinfo; + + skb_release_head_state(skb); + shinfo = skb_shinfo(skb); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + atomic_set(&shinfo->dataref, 1); + + memset(skb, 0, offsetof(struct sk_buff, tail)); + skb->data = skb->head + NET_SKB_PAD; + skb_reset_tail_pointer(skb); +} + +/** + * skb_recycle_check - check if skb can be reused for receive + * @skb: buffer + * @skb_size: minimum receive buffer size + * + * Checks that the skb passed in is not shared or cloned, and + * that it is linear and its head portion at least as large as + * skb_size so that it can be recycled as a receive buffer. + * If these conditions are met, this function does any necessary + * reference count dropping and cleans up the skbuff as if it + * just came from __alloc_skb(). +*/ +bool skb_recycle_check(struct sk_buff *skb, int skb_size) +{ + if (!skb_is_recycleable(skb, skb_size)) + return false; + + skb_recycle(skb); + + return true; +} +#endif + +/** + * al_eth_tx_poll - NAPI Tx polling callback + * @napi: structure for representing this polling device + * @budget: how many packets driver is allowed to clean + * + * This function is used for legacy and MSI, NAPI mode + **/ +static int al_eth_tx_poll(struct napi_struct *napi, int budget) +{ + struct al_eth_napi *al_napi = + container_of(napi, struct al_eth_napi, napi); + struct al_eth_adapter *adapter = al_napi->adapter; + unsigned int qid = al_napi->qid; + struct al_eth_ring *tx_ring = &adapter->tx_ring[qid]; + struct netdev_queue *txq; + unsigned int tx_bytes = 0; + unsigned int total_done; + u16 next_to_clean; + int tx_pkt = 0; +#ifdef CONFIG_AL_ETH_ALLOC_SKB + struct sk_buff_head *rx_rc = &__get_cpu_var(rx_recycle_cache); +#endif + total_done = al_eth_comp_tx_get(tx_ring->dma_q); + dev_dbg(&adapter->pdev->dev, "tx_poll: q %d total completed descs %x\n", + qid, total_done); + next_to_clean = tx_ring->next_to_clean; + txq = netdev_get_tx_queue(adapter->netdev, qid); + + while (total_done) { + struct al_eth_tx_buffer *tx_info; + struct sk_buff *skb; + struct al_buf *al_buf; + int i, nr_frags; + + tx_info = &tx_ring->tx_buffer_info[next_to_clean]; + /* stop if not all descriptors of the packet are completed */ + if (tx_info->tx_descs > total_done) + break; + + skb = tx_info->skb; + + /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ + prefetch(&skb->end); + + tx_info->skb = NULL; + al_buf = tx_info->hal_pkt.bufs; + dma_unmap_single(tx_ring->dev, dma_unmap_addr(al_buf, addr), + dma_unmap_len(al_buf, len), DMA_TO_DEVICE); + + /* unmap remaining mapped pages */ + nr_frags = tx_info->hal_pkt.num_of_bufs - 1; + for (i = 0; i < nr_frags; i++) { + al_buf++; + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(al_buf, addr), + dma_unmap_len(al_buf, len), + DMA_TO_DEVICE); + } + + tx_bytes += skb->len; + dev_dbg(&adapter->pdev->dev, "tx_poll: q %d skb %p completed\n", + qid, skb); +#ifdef CONFIG_AL_ETH_ALLOC_SKB + if ((skb_queue_len(rx_rc) < AL_ETH_DEFAULT_RX_DESCS) && + skb_recycle_check(skb, tx_ring->netdev->mtu + ETH_HLEN + + ETH_FCS_LEN + VLAN_HLEN)) + __skb_queue_head(rx_rc, skb); + else +#endif + dev_kfree_skb(skb); + tx_pkt++; + total_done -= tx_info->tx_descs; + next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean); + } + + netdev_tx_completed_queue(txq, tx_pkt, tx_bytes); + + tx_ring->next_to_clean = next_to_clean; + + dev_dbg(&adapter->pdev->dev, "tx_poll: q %d done next to clean %x\n", + qid, next_to_clean); + + /* need to make the rings circular update visible to + * al_eth_start_xmit() before checking for netif_queue_stopped(). + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq) && + (al_udma_available_get(tx_ring->dma_q) > + AL_ETH_TX_WAKEUP_THRESH))) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + (al_udma_available_get(tx_ring->dma_q) > + AL_ETH_TX_WAKEUP_THRESH)) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); + } + + /* all work done, exit the polling mode */ + napi_complete(napi); + al_reg_write32_relaxed(tx_ring->unmask_reg_offset, tx_ring->unmask_val); + return 0; +} +#ifdef CONFIG_AL_ETH_ALLOC_PAGE +static struct sk_buff *al_eth_rx_skb(struct al_eth_adapter *adapter, + struct al_eth_ring *rx_ring, + struct al_eth_pkt *hal_pkt, + unsigned int descs, u16 *next_to_clean) +{ + struct sk_buff *skb; + struct al_eth_rx_buffer *rx_info = + &rx_ring->rx_buffer_info[*next_to_clean]; + struct page *page = rx_info->page; + unsigned int len; + unsigned int buf = 0; + void *va; + + skb = netdev_alloc_skb_ip_align(adapter->netdev, + adapter->small_copy_len); + if (!skb) { + /*rx_ring->rx_stats.alloc_rx_buff_failed++;*/ + netdev_dbg(adapter->netdev, "Failed allocating skb\n"); + return NULL; + } + + netdev_dbg(adapter->netdev, "rx skb allocated. len %d. data_len %d\n", + skb->len, skb->data_len); + + len = hal_pkt->bufs[0].len; + dev_dbg(&adapter->pdev->dev, "rx_info %p page %p\n", rx_info, + rx_info->page); + + page = rx_info->page; + /* save virt address of first buffer */ + va = page_address(rx_info->page) + rx_info->page_offset; + prefetch(va + AL_ETH_RX_OFFSET); + + if (len <= adapter->small_copy_len) { + netdev_dbg(adapter->netdev, "rx small packet. len %d\n", len); + /* sync this buffer for CPU use */ + dma_sync_single_for_cpu(rx_ring->dev, rx_info->dma, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, va, len); + dma_sync_single_for_device(rx_ring->dev, rx_info->dma, len, + DMA_FROM_DEVICE); + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, adapter->netdev); + *next_to_clean = + AL_ETH_RX_RING_IDX_ADD(rx_ring, *next_to_clean, descs); + return skb; + } + + do { + dma_unmap_page(rx_ring->dev, dma_unmap_addr(rx_info, dma), + PAGE_SIZE, DMA_FROM_DEVICE); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, + rx_info->page_offset, len, PAGE_SIZE); + + netdev_dbg(adapter->netdev, + "rx skb updated. len %d. data_len %d\n", skb->len, + skb->data_len); + + rx_info->page = NULL; + *next_to_clean = + AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); + if (likely(--descs == 0)) + break; + rx_info = &rx_ring->rx_buffer_info[*next_to_clean]; + len = hal_pkt->bufs[++buf].len; + } while (1); + + /* Copy headers into the skb linear buffer */ + skb_copy_to_linear_data(skb, va, AL_ETH_HEADER_COPY_SIZE); + skb->tail += AL_ETH_HEADER_COPY_SIZE; + + /* Skip headers in first fragment */ + skb_shinfo(skb)->frags[0].page_offset += AL_ETH_HEADER_COPY_SIZE; + + /* Adjust size of first fragment */ + skb_frag_size_sub(&skb_shinfo(skb)->frags[0], AL_ETH_HEADER_COPY_SIZE); + skb->data_len -= AL_ETH_HEADER_COPY_SIZE; + skb->protocol = eth_type_trans(skb, adapter->netdev); + return skb; +} +#elif defined(CONFIG_AL_ETH_ALLOC_FRAG) +static struct sk_buff *al_eth_rx_skb(struct al_eth_adapter *adapter, + struct al_eth_ring *rx_ring, + struct al_eth_pkt *hal_pkt, + unsigned int descs, u16 *next_to_clean) +{ + struct sk_buff *skb; + struct al_eth_rx_buffer *rx_info = + &rx_ring->rx_buffer_info[*next_to_clean]; + unsigned int len; + unsigned int buf = 0; + + len = hal_pkt->bufs[0].len; + netdev_dbg(adapter->netdev, "rx_info %p data %p\n", rx_info, + rx_info->data); + + prefetch(rx_info->data + AL_ETH_RX_OFFSET); + + if (len <= adapter->small_copy_len) { + netdev_dbg(adapter->netdev, "rx small packet. len %d\n", len); + + skb = netdev_alloc_skb_ip_align(adapter->netdev, + adapter->small_copy_len); + if (unlikely(!skb)) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, rx_info->dma, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, rx_info->data + AL_ETH_RX_OFFSET, + len); + dma_sync_single_for_device(&adapter->pdev->dev, rx_info->dma, + len, DMA_FROM_DEVICE); + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, adapter->netdev); + *next_to_clean = + AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); + return skb; + } + + dma_unmap_single(rx_ring->dev, dma_unmap_addr(rx_info, dma), + rx_info->data_size, DMA_FROM_DEVICE); +#if 0 + skb = build_skb(rx_info->data, rx_ring->frag_size); + if (unlikely(!skb)) + return NULL; +#else + skb = napi_get_frags(rx_ring->napi); + if (unlikely(!skb)) + return NULL; + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_info->page, + rx_info->page_offset + AL_ETH_RX_OFFSET, len); + + skb->len += len; + skb->data_len += len; + skb->truesize += len; +#endif +#if 0 + skb_reserve(skb, AL_ETH_RX_OFFSET); + skb_put(skb, len); +#endif + netdev_dbg(adapter->netdev, "rx skb updated. len %d. data_len %d\n", + skb->len, skb->data_len); + + rx_info->data = NULL; + *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); + + while (--descs) { + rx_info = &rx_ring->rx_buffer_info[*next_to_clean]; + len = hal_pkt->bufs[++buf].len; + + dma_unmap_single(rx_ring->dev, dma_unmap_addr(rx_info, dma), + rx_info->data_size, DMA_FROM_DEVICE); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, + rx_info->page_offset + AL_ETH_RX_OFFSET, len, + rx_info->data_size); + + netdev_dbg(adapter->netdev, + "rx skb updated. len %d. " + "data_len %d\n", + skb->len, skb->data_len); + + rx_info->data = NULL; + + *next_to_clean = + AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); + } + + return skb; +} +#elif defined(CONFIG_AL_ETH_ALLOC_SKB) +static struct sk_buff *al_eth_rx_skb(struct al_eth_adapter *adapter, + struct al_eth_ring *rx_ring, + struct al_eth_pkt *hal_pkt, + unsigned int descs, u16 *next_to_clean) +{ + struct sk_buff *skb; + struct al_eth_rx_buffer *rx_info = + &rx_ring->rx_buffer_info[*next_to_clean]; + unsigned int len; + + prefetch(rx_info->data + AL_ETH_RX_OFFSET); + skb = rx_info->skb; + prefetch(skb); + prefetch(&skb->end); + prefetch(&skb->dev); + + len = hal_pkt->bufs[0].len; + + dma_unmap_single(rx_ring->dev, dma_unmap_addr(rx_info, dma), + rx_info->data_size, DMA_FROM_DEVICE); + + skb_reserve(skb, AL_ETH_RX_OFFSET); + skb_put(skb, len); + + skb->protocol = eth_type_trans(skb, adapter->netdev); + rx_info->skb = NULL; + *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); + /* prefetch next packet */ + prefetch((rx_info + 1)->data + AL_ETH_RX_OFFSET); + prefetch((rx_info + 1)->skb); + + return skb; +} +#endif + +/** + * al_eth_rx_checksum - indicate in skb if hw indicated a good cksum + * @adapter: structure containing adapter specific data + * @hal_pkt: HAL structure for the packet + * @skb: skb currently being received and modified + **/ +static inline void al_eth_rx_checksum(struct al_eth_adapter *adapter, + struct al_eth_pkt *hal_pkt, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Rx csum disabled */ + if (unlikely(!(adapter->netdev->features & NETIF_F_RXCSUM))) { + netdev_dbg(adapter->netdev, + "hw checksum offloading disabled\n"); + return; + } + + /* if IP and error */ + if (unlikely((hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) && + (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) { + /* ipv4 checksum error */ + netdev_dbg(adapter->netdev, "rx ipv4 header checksum error\n"); + return; + } + + /* if TCP/UDP */ + if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) || + (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) { + /* TODO: check if we need the test above for TCP/UDP */ + if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) { + /* TCP/UDP checksum error */ + netdev_dbg(adapter->netdev, "rx L4 checksum error\n"); + return; + } else { + netdev_dbg(adapter->netdev, "rx checksum correct\n"); + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } +} + +/** + * al_eth_rx_poll - NAPI Rx polling callback + * @napi: structure for representing this polling device + * @budget: how many packets driver is allowed to clean + * + * This function is used for legacy and MSI, NAPI mode + **/ +static int al_eth_rx_poll(struct napi_struct *napi, int budget) +{ + struct al_eth_napi *al_napi = + container_of(napi, struct al_eth_napi, napi); + struct al_eth_adapter *adapter = al_napi->adapter; + unsigned int qid = al_napi->qid; + struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; + struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt; + int work_done = 0; + u16 next_to_clean = rx_ring->next_to_clean; + int refill_required; + int refill_actual; + + netdev_dbg(adapter->netdev, "%s qid %d\n", __func__, qid); + + do { + struct sk_buff *skb; + unsigned int descs; + + descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt); + if (unlikely(descs == 0)) + break; + + netdev_dbg(adapter->netdev, + "rx_poll: q %d got packet from hal. descs %d\n", qid, + descs); + netdev_dbg(adapter->netdev, + "rx_poll: q %d flags %x. l3 proto %d l4 proto %d\n", + qid, hal_pkt->flags, hal_pkt->l3_proto_idx, + hal_pkt->l4_proto_idx); + + /* ignore if detected dma or eth controller errors */ + if (hal_pkt->flags & (AL_ETH_RX_ERROR | AL_UDMA_CDESC_ERROR)) { + netdev_dbg(adapter->netdev, + "receive packet with error. flags = 0x%x\n", + hal_pkt->flags); + next_to_clean = AL_ETH_RX_RING_IDX_ADD( + rx_ring, next_to_clean, descs); + goto next; + } + + /* allocate skb and fill it */ + skb = al_eth_rx_skb(adapter, rx_ring, hal_pkt, descs, + &next_to_clean); + + /* exit if we failed to retrieve a buffer */ + if (unlikely(!skb)) { + next_to_clean = AL_ETH_RX_RING_IDX_ADD( + rx_ring, next_to_clean, descs); + break; + } + + al_eth_rx_checksum(adapter, hal_pkt, skb); + if (likely(adapter->netdev->features & NETIF_F_RXHASH)) { + skb->hash = hal_pkt->rxhash; + if (likely((hal_pkt->l4_proto_idx == + AL_ETH_PROTO_ID_TCP) || + (hal_pkt->l4_proto_idx == + AL_ETH_PROTO_ID_UDP))) + skb->l4_hash = 1; + } + + skb_record_rx_queue(skb, qid); + +#ifdef CONFIG_AL_ETH_ALLOC_SKB + netif_receive_skb(skb); +#else + if (hal_pkt->bufs[0].len <= adapter->small_copy_len) + napi_gro_receive(napi, skb); + else + napi_gro_frags(napi); +#endif + +next: + budget--; + work_done++; + } while (likely(budget)); + + rx_ring->next_to_clean = next_to_clean; + + refill_required = al_udma_available_get(rx_ring->dma_q); + refill_actual = al_eth_refill_rx_bufs(adapter, qid, refill_required); + + if (unlikely(refill_actual < refill_required)) { + netdev_warn(adapter->netdev, "%s: rescheduling rx queue %d\n", + __func__, qid); + napi_reschedule(napi); + } else if (budget > 0) { + dev_dbg(&adapter->pdev->dev, + "rx_poll: q %d done next to clean %x\n", qid, + next_to_clean); + napi_complete(napi); + al_reg_write32_relaxed(rx_ring->unmask_reg_offset, + rx_ring->unmask_val); + } + + return work_done; +} + +/** + * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t al_eth_intr_intx_all(int irq, void *data) +{ + struct al_eth_adapter *adapter = data; + struct unit_regs __iomem *regs_base = + (struct unit_regs __iomem *)adapter->udma_base; + uint32_t reg; + + reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_A); + if (likely(reg)) + pr_debug("%s group A cause %x\n", __func__, reg); + + if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) { + struct al_iofic_grp_ctrl __iomem *sec_ints_base; + uint32_t cause_d = al_udma_iofic_read_cause( + regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D); + + sec_ints_base = + ®s_base->gen.interrupt_regs.secondary_iofic_ctrl[0]; + if (cause_d) { + pr_debug("got interrupt from group D. cause %x\n", + cause_d); + + cause_d = al_iofic_read_cause(sec_ints_base, + AL_INT_GROUP_A); + pr_debug("secondary A cause %x\n", cause_d); + + cause_d = al_iofic_read_cause(sec_ints_base, + AL_INT_GROUP_B); + + pr_debug("secondary B cause %x\n", cause_d); + } + } + if (reg & AL_INT_GROUP_A_GROUP_B_SUM) { + uint32_t cause_b = al_udma_iofic_read_cause( + regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B); + int qid; + for (qid = 0; qid < adapter->num_rx_queues; qid++) { + if (cause_b & (1 << qid)) { + /* mask */ + al_udma_iofic_mask((struct unit_regs __iomem *) + adapter->udma_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_B, 1 << qid); + + napi_schedule( + &adapter->al_napi[AL_ETH_RXQ_NAPI_IDX( + adapter, qid)] + .napi); + } + } + } + if (reg & AL_INT_GROUP_A_GROUP_C_SUM) { + uint32_t cause_c = al_udma_iofic_read_cause( + regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C); + int qid; + for (qid = 0; qid < adapter->num_tx_queues; qid++) { + if (cause_c & (1 << qid)) { + /* mask */ + al_udma_iofic_mask((struct unit_regs __iomem *) + adapter->udma_base, + AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_C, 1 << qid); + + napi_schedule( + &adapter->al_napi[AL_ETH_TXQ_NAPI_IDX( + adapter, qid)] + .napi); + } + } + } + + return IRQ_HANDLED; +} + +/** + * al_eth_intr_msix_all - MSIX Interrupt Handler for all interrupts + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t al_eth_intr_msix_all(int irq, void *data) +{ + return IRQ_HANDLED; +} + +/** + * al_eth_intr_msix_mgmt - MSIX Interrupt Handler for Management interrupts + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t al_eth_intr_msix_mgmt(int irq, void *data) +{ + pr_info("got mgmt interrupt\n"); + return IRQ_HANDLED; +} + +/** + * al_eth_intr_msix_tx - MSIX Interrupt Handler for Tx + * @irq: interrupt number + * @data: pointer to a network interface private napi device structure + **/ +static irqreturn_t al_eth_intr_msix_tx(int irq, void *data) +{ + struct al_eth_napi *al_napi = data; + + pr_debug("%s\n", __func__); + napi_schedule(&al_napi->napi); + + return IRQ_HANDLED; +} + +/** + * al_eth_intr_msix_rx - MSIX Interrupt Handler for Rx + * @irq: interrupt number + * @data: pointer to a network interface private napi device structure + **/ +static irqreturn_t al_eth_intr_msix_rx(int irq, void *data) +{ + struct al_eth_napi *al_napi = data; + + pr_debug("%s\n", __func__); + napi_schedule(&al_napi->napi); + return IRQ_HANDLED; +} + +static void al_eth_enable_msix(struct al_eth_adapter *adapter) +{ + int i, msix_vecs, rc; + + msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues; + + dev_dbg(&adapter->pdev->dev, "Try to enable MSIX, vectors %d\n", + msix_vecs); + + adapter->msix_entries = + kcalloc(msix_vecs, sizeof(struct msix_entry), GFP_KERNEL); + + if (!adapter->msix_entries) { + dev_err(&adapter->pdev->dev, + "failed to allocate msix_entries, vectors %d\n", + msix_vecs); + + return; + } + + /* management vector (GROUP_A) @2*/ + adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2; + adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0; + + /* rx queues start @3 */ + for (i = 0; i < adapter->num_rx_queues; i++) { + int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i); + + adapter->msix_entries[irq_idx].entry = 3 + i; + adapter->msix_entries[irq_idx].vector = 0; + } + /* tx queues start @7 */ + for (i = 0; i < adapter->num_tx_queues; i++) { + int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i); + + adapter->msix_entries[irq_idx].entry = + 3 + AL_ETH_MAX_HW_QUEUES + i; + adapter->msix_entries[irq_idx].vector = 0; + } + + rc = -ENOSPC; + while (msix_vecs >= 1) { + rc = pci_enable_msix_exact(adapter->pdev, adapter->msix_entries, + msix_vecs); + if (rc <= 0) + break; + if (rc > 0) + msix_vecs = + 1; /* if we can't allocate all, then try only 1; */ + } + + if (rc != 0) { + dev_dbg(&adapter->pdev->dev, + "failed to enable MSIX, vectors %d\n", msix_vecs); + adapter->msix_vecs = 0; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + dev_dbg(&adapter->pdev->dev, "%s %d\n", __func__, __LINE__); + + return; + } + dev_dbg(&adapter->pdev->dev, "enable MSIX, vectors %d\n", msix_vecs); + + /* enable MSIX in the msix capability of the eth controller + * as the pci_enable_msix_exact enables it in the pcie unit capability + */ + if ((adapter->board_type == ALPINE_NIC) || + (adapter->board_type == ALPINE_FPGA_NIC)) + writew(PCI_MSIX_FLAGS_ENABLE, + adapter->internal_pcie_base + 0x92); + + if (msix_vecs >= 1) { + if (al_init_rx_cpu_rmap(adapter)) + dev_warn(&adapter->pdev->dev, + "failed to map irqs to cpus\n"); + } + + adapter->msix_vecs = msix_vecs; + adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED; +} + +static int al_eth_setup_int_mode(struct al_eth_adapter *adapter, int dis_msi) +{ + int i; + unsigned int cpu; + + if (!dis_msi) + al_eth_enable_msix(adapter); + + adapter->irq_vecs = max(1, adapter->msix_vecs); + + /* single INTX mode */ + if (adapter->msix_vecs == 0) { + snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, + AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s", + pci_name(adapter->pdev)); + adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = + al_eth_intr_intx_all; + adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = + adapter->pdev->irq; + adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; + cpu = cpumask_first(cpu_online_mask); + cpumask_set_cpu(cpu, &adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX] + .affinity_hint_mask); + + return 0; + } + + /* single MSI-X mode */ + if (adapter->msix_vecs == 1) { + snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, + AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s", + pci_name(adapter->pdev)); + adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = + al_eth_intr_msix_all; + adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = + adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector; + adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; + + cpu = cpumask_first(cpu_online_mask); + cpumask_set_cpu(cpu, &adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX] + .affinity_hint_mask); + + return 0; + } + /* MSI-X per queue*/ + snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, + AL_ETH_IRQNAME_SIZE, "al-eth-msix-mgmt@pci:%s", + pci_name(adapter->pdev)); + adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt; + + adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; + adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = + adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector; + cpu = cpumask_first(cpu_online_mask); + cpumask_set_cpu( + cpu, &adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].affinity_hint_mask); + + for (i = 0; i < adapter->num_rx_queues; i++) { + int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i); + int napi_idx = AL_ETH_RXQ_NAPI_IDX(adapter, i); + + snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE, + "al-eth-rx-comp-%d@pci:%s", i, + pci_name(adapter->pdev)); + adapter->irq_tbl[irq_idx].handler = al_eth_intr_msix_rx; + adapter->irq_tbl[irq_idx].data = &adapter->al_napi[napi_idx]; + adapter->irq_tbl[irq_idx].vector = + adapter->msix_entries[irq_idx].vector; + + cpu = cpumask_next((i % num_online_cpus() - 1), + cpu_online_mask); + cpumask_set_cpu(cpu, + &adapter->irq_tbl[irq_idx].affinity_hint_mask); + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i); + int napi_idx = AL_ETH_TXQ_NAPI_IDX(adapter, i); + + snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE, + "al-eth-tx-comp-%d@pci:%s", i, + pci_name(adapter->pdev)); + adapter->irq_tbl[irq_idx].handler = al_eth_intr_msix_tx; + adapter->irq_tbl[irq_idx].data = &adapter->al_napi[napi_idx]; + adapter->irq_tbl[irq_idx].vector = + adapter->msix_entries[irq_idx].vector; + + cpu = cpumask_next((i % num_online_cpus() - 1), + cpu_online_mask); + cpumask_set_cpu(cpu, + &adapter->irq_tbl[irq_idx].affinity_hint_mask); + } + + return 0; +} + +static int al_eth_configure_int_mode(struct al_eth_adapter *adapter) +{ + enum al_iofic_mode int_mode; + uint32_t m2s_errors_disable = 0x480; + uint32_t m2s_aborts_disable = 0x480; + uint32_t s2m_errors_disable = 0x1E0; + uint32_t s2m_aborts_disable = 0x1E0; + + /* single INTX mode */ + if (adapter->msix_vecs == 0) + int_mode = AL_IOFIC_MODE_LEGACY; + else if (adapter->msix_vecs > 1) + int_mode = AL_IOFIC_MODE_MSIX_PER_Q; + else { + netdev_err(adapter->netdev, + "udma doesn't support single MSI-X mode yet.\n"); + return -EIO; + } + + if (adapter->board_type != ALPINE_INTEGRATED) { + m2s_errors_disable |= 0x3f << 25; + m2s_errors_disable |= 0x3f << 25; + s2m_aborts_disable |= 0x3f << 25; + s2m_aborts_disable |= 0x3f << 25; + } + + if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base, + int_mode, m2s_errors_disable, + m2s_aborts_disable, s2m_errors_disable, + s2m_aborts_disable)) { + netdev_err(adapter->netdev, + "al_udma_unit_int_config failed!.\n"); + return -EIO; + } + adapter->int_mode = int_mode; + netdev_info(adapter->netdev, "using %s interrupt mode", + int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" : + int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : + "Unknown"); + /* set interrupt moderation resolution to 15us */ + al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base)) + ->gen.interrupt_regs.main_iofic, + AL_INT_GROUP_B, 15); + al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base)) + ->gen.interrupt_regs.main_iofic, + AL_INT_GROUP_C, 15); + + return 0; +} + +static int al_eth_request_irq(struct al_eth_adapter *adapter) +{ + unsigned long flags; + struct al_eth_irq *irq; + int rc = 0, i; + + if (adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) + flags = 0; + else + flags = IRQF_SHARED; + + for (i = 0; i < adapter->irq_vecs; i++) { + irq = &adapter->irq_tbl[i]; + rc = request_irq(irq->vector, irq->handler, flags, irq->name, + irq->data); + if (rc) { + netdev_err(adapter->netdev, + "failed to request irq %d. index %d rc %d\n", + irq->vector, i, rc); + break; + } + irq->requested = 1; + + netdev_dbg(adapter->netdev, + "set affinity hint of irq. index %d" + " to 0x%lx (irq vector: %d)\n", + i, irq->affinity_hint_mask.bits[0], irq->vector); + + irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); + } + return rc; +} + +static void __al_eth_free_irq(struct al_eth_adapter *adapter) +{ + struct al_eth_irq *irq; + int i; + + for (i = 0; i < adapter->irq_vecs; i++) { + irq = &adapter->irq_tbl[i]; + if (irq->requested) { + irq_set_affinity_hint(irq->vector, NULL); + free_irq(irq->vector, irq->data); + } + irq->requested = 0; + } +} + +static void al_eth_free_irq(struct al_eth_adapter *adapter) +{ +#ifdef CONFIG_RFS_ACCEL + if (adapter->msix_vecs >= 1) { + free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); + adapter->netdev->rx_cpu_rmap = NULL; + } +#endif + + __al_eth_free_irq(adapter); + if (adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) + pci_disable_msix(adapter->pdev); + + adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED; + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +static void al_eth_interrupts_mask(struct al_eth_adapter *adapter); + +static void al_eth_disable_int_sync(struct al_eth_adapter *adapter) +{ + int i; + + if (!netif_running(adapter->netdev)) + return; + + /* disable forwarding interrupts from eth through pci end point*/ + if ((adapter->board_type == ALPINE_FPGA_NIC) || + (adapter->board_type == ALPINE_NIC)) + writel(0, adapter->internal_pcie_base + 0x1800000 + 0x1210); + + /* mask hw interrupts */ + al_eth_interrupts_mask(adapter); + + for (i = 0; i < adapter->irq_vecs; i++) + synchronize_irq(adapter->irq_tbl[i].vector); +} + +static void al_eth_interrupts_unmask(struct al_eth_adapter *adapter) +{ + u32 group_a_mask = + AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */ + u32 group_b_mask = (1 << adapter->num_rx_queues) - 1; /* bit per Rx q*/ + u32 group_c_mask = (1 << adapter->num_tx_queues) - 1; /* bit per Tx q*/ + u32 group_d_mask = 3 << 8; + struct unit_regs __iomem *regs_base = + (struct unit_regs __iomem *)adapter->udma_base; + + if (adapter->int_mode == AL_IOFIC_MODE_LEGACY) + group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM | + AL_INT_GROUP_A_GROUP_C_SUM | + AL_INT_GROUP_A_GROUP_D_SUM; + + al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_A, group_a_mask); + al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_B, group_b_mask); + al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_C, group_c_mask); + al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_D, group_d_mask); +} + +static void al_eth_interrupts_mask(struct al_eth_adapter *adapter) +{ + struct unit_regs __iomem *regs_base = + (struct unit_regs __iomem *)adapter->udma_base; + + /* mask all interrupts */ + al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_A, 0x7); + al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_B, 0xF); + al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_C, 0xF); + al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, + AL_INT_GROUP_D, 0xFFFFFFFF); +} + +static int al_init_rx_cpu_rmap(struct al_eth_adapter *adapter) +{ +#ifdef CONFIG_RFS_ACCEL + unsigned int i; + int rc; + + adapter->netdev->rx_cpu_rmap = + alloc_irq_cpu_rmap(adapter->num_rx_queues); + if (!adapter->netdev->rx_cpu_rmap) + return -ENOMEM; + for (i = 0; i < adapter->num_rx_queues; i++) { + int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i); + + rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, + adapter->msix_entries[irq_idx].vector); + if (rc) { + free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); + adapter->netdev->rx_cpu_rmap = NULL; + return rc; + } + } +#endif + return 0; +} + +static void al_eth_del_napi(struct al_eth_adapter *adapter) +{ + int i; + int napi_num = adapter->num_rx_queues + adapter->num_tx_queues; + + for (i = 0; i < napi_num; i++) + netif_napi_del(&adapter->al_napi[i].napi); +} + +static void al_eth_init_napi(struct al_eth_adapter *adapter) +{ + int i; + int napi_num = adapter->num_rx_queues + adapter->num_tx_queues; + + for (i = 0; i < napi_num; i++) { + struct al_eth_napi *napi = &adapter->al_napi[i]; + int (*poll)(struct napi_struct *, int); + + if (i < adapter->num_rx_queues) { + poll = al_eth_rx_poll; + napi->qid = i; + netif_napi_add(adapter->netdev, + &adapter->al_napi[i].napi, poll); + } else { + poll = al_eth_tx_poll; + napi->qid = i - adapter->num_rx_queues; + netif_napi_add(adapter->netdev, + &adapter->al_napi[i].napi, poll); + } + napi->adapter = adapter; + } +} + +static void al_eth_napi_disable_all(struct al_eth_adapter *adapter) +{ + int i; + int napi_num = adapter->num_rx_queues + adapter->num_tx_queues; + + for (i = 0; i < napi_num; i++) + napi_disable(&adapter->al_napi[i].napi); +} + +static void al_eth_napi_enable_all(struct al_eth_adapter *adapter) + +{ + int i; + int napi_num = adapter->num_rx_queues + adapter->num_tx_queues; + + for (i = 0; i < napi_num; i++) + napi_enable(&adapter->al_napi[i].napi); +} + +/* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */ +static void al_eth_fsm_table_init(struct al_eth_adapter *adapter) +{ + uint32_t val; + int i; + + for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) { + uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i); + switch (outer_type) { + case AL_ETH_FSM_ENTRY_IPV4_TCP: + case AL_ETH_FSM_ENTRY_IPV4_UDP: + case AL_ETH_FSM_ENTRY_IPV6_TCP: + case AL_ETH_FSM_ENTRY_IPV6_UDP: + val = AL_ETH_FSM_DATA_OUTER_4_TUPLE | + AL_ETH_FSM_DATA_HASH_SEL; + break; + case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP: + case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP: + val = AL_ETH_FSM_DATA_OUTER_2_TUPLE | + AL_ETH_FSM_DATA_HASH_SEL; + break; + case AL_ETH_FSM_ENTRY_NOT_IP: + if (AL_ETH_FSM_ENTRY_TUNNELED(i) && + !AL_ETH_FSM_ENTRY_INNER(i)) /*PPPoE*/ + val = AL_ETH_FSM_DATA_INNER_4_TUPLE | + AL_ETH_FSM_DATA_HASH_SEL; + else + val = (0 << AL_ETH_FSM_DATA_DEFAULT_Q_SHIFT | + ((1 << 0) + << AL_ETH_FSM_DATA_DEFAULT_UDMA_SHIFT)); + break; + default: + val = (0 << AL_ETH_FSM_DATA_DEFAULT_Q_SHIFT | + ((1 << 0) + << AL_ETH_FSM_DATA_DEFAULT_UDMA_SHIFT)); + } + al_eth_fsm_table_set(&adapter->hal_adapter, i, val); + } +} + +/* the following defines should be moved to hal */ +#define AL_ETH_CTRL_TABLE_PRIO_SEL_SHIFT 0 +#define AL_ETH_CTRL_TABLE_PRIO_SEL_MASK \ + (0xF << AL_ETH_CTRL_TABLE_PRIO_SEL_SHIFT) +#define AL_ETH_CTRL_TABLE_PRIO_SEL_0 (12 << AL_ETH_CTRL_TABLE_PRIO_SEL_SHIFT) + +#define AL_ETH_CTRL_TABLE_Q_SEL_SHIFT 4 +#define AL_ETH_CTRL_TABLE_Q_SEL_MASK (0xF << AL_ETH_CTRL_TABLE_Q_SEL_SHIFT) +#define AL_ETH_CTRL_TABLE_Q_SEL_THASH (1 << AL_ETH_CTRL_TABLE_Q_SEL_SHIFT) + +#define AL_ETH_CTRL_TABLE_Q_PRIO_SEL_SHIFT 8 +#define AL_ETH_CTRL_TABLE_Q_PRIO_SEL_MASK \ + (0x3 << AL_ETH_CTRL_TABLE_Q_PRIO_SEL_SHIFT) +/* selected queue is hash output table */ +#define AL_ETH_CTRL_TABLE_Q_PRIO_SEL_Q (3 << AL_ETH_CTRL_TABLE_Q_PRIO_SEL_SHIFT) + +#define AL_ETH_CTRL_TABLE_UDMA_SEL_SHIFT 10 +#define AL_ETH_CTRL_TABLE_UDMA_SEL_MASK \ + (0xF << AL_ETH_CTRL_TABLE_UDMA_SEL_SHIFT) +/* select UDMA from rfw_default opt1 register */ +#define AL_ETH_CTRL_TABLE_UDMA_SEL_DEF_1 (7 << AL_ETH_CTRL_TABLE_UDMA_SEL_SHIFT) +#define AL_ETH_CTRL_TABLE_UDMA_SEL_0 (15 << AL_ETH_CTRL_TABLE_UDMA_SEL_SHIFT) + +#define AL_ETH_CTRL_TABLE_UDMA_SEL_MASK_INPUT (1 << 14) + +#define AL_ETH_CTRL_TABLE_USE_TABLE (1 << 20) + +#define AL_ETH_MAC_TABLE_UNICAST_IDX_BASE 0 +#define AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT 4 +#define AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX \ + (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) + +#define AL_ETH_MAC_TABLE_DROP_IDX (AL_ETH_FWD_MAC_NUM - 1) +#define AL_ETH_MAC_TABLE_BROADCAST_IDX (AL_ETH_MAC_TABLE_DROP_IDX - 1) + +#define MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5] + +static void al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter, + uint8_t idx, uint8_t *addr, + uint8_t udma_mask) +{ + struct al_eth_fwd_mac_table_entry entry = { { 0 } }; + + memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr)); + + memset(entry.mask, 0xff, sizeof(entry.mask)); + entry.rx_valid = true; + entry.tx_valid = false; + entry.udma_mask = udma_mask; + entry.filter = false; + + netdev_dbg(adapter->netdev, + "%s: [%d]: addr " MAC_ADDR_STR " mask " MAC_ADDR_STR "\n", + __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); + + al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); +} + +static void al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, + uint8_t idx, uint8_t udma_mask) +{ + struct al_eth_fwd_mac_table_entry entry = { { 0 } }; + + memset(entry.addr, 0x00, sizeof(entry.addr)); + memset(entry.mask, 0x00, sizeof(entry.mask)); + entry.mask[0] |= BIT(0); + entry.addr[0] |= BIT(0); + + entry.rx_valid = true; + entry.tx_valid = false; + entry.udma_mask = udma_mask; + entry.filter = false; + + netdev_dbg(adapter->netdev, + "%s: [%d]: addr " MAC_ADDR_STR " mask " MAC_ADDR_STR "\n", + __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); + + al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); +} + +static void al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter, + uint8_t idx, uint8_t udma_mask) +{ + struct al_eth_fwd_mac_table_entry entry = { { 0 } }; + + memset(entry.addr, 0xff, sizeof(entry.addr)); + memset(entry.mask, 0xff, sizeof(entry.mask)); + + entry.rx_valid = true; + entry.tx_valid = false; + entry.udma_mask = udma_mask; + entry.filter = false; + + netdev_dbg(adapter->netdev, + "%s: [%d]: addr " MAC_ADDR_STR " mask " MAC_ADDR_STR "\n", + __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); + + al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); +} + +static void al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter, + al_bool promiscuous) +{ + struct al_eth_fwd_mac_table_entry entry = { { 0 } }; + + memset(entry.addr, 0x00, sizeof(entry.addr)); + memset(entry.mask, 0x00, sizeof(entry.mask)); + + entry.rx_valid = true; + entry.tx_valid = false; + entry.udma_mask = (promiscuous) ? 1 : 0; + entry.filter = (promiscuous) ? false : true; + + netdev_dbg(adapter->netdev, "%s: %s promiscuous mode\n", __func__, + (promiscuous) ? "enter" : "exit"); + + al_eth_fwd_mac_table_set(&adapter->hal_adapter, + AL_ETH_MAC_TABLE_DROP_IDX, &entry); +} + +static void al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter, + uint8_t idx) +{ + struct al_eth_fwd_mac_table_entry entry = { { 0 } }; + + netdev_dbg(adapter->netdev, "%s: clear entry %d\n", __func__, idx); + + al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); +} + +/* configure the RX forwarding (UDMA/QUEUE.. selection) + * currently we don't use the full control table, we use only the default configuration + */ + +static void al_eth_config_rx_fwd(struct al_eth_adapter *adapter) +{ + struct al_eth_fwd_ctrl_table_entry entry; + int i; + + /* let priority be equal to pbits */ + for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++) + al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i); + + /* map priority to queue index, queue id = priority/2 */ + for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++) + al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1); + + entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0; + entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE; + entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO; + entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE; + entry.filter = AL_FALSE; + + al_eth_ctrl_table_def_set(&adapter->hal_adapter, AL_FALSE, &entry); + + /* + * By default set the mac table to forward all unicast packets to our + * MAC address and all broadcast. all the rest will be dropped. + */ + al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE, + adapter->mac_addr, 1); + al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, + 1); + al_eth_mac_table_promiscuous_set(adapter, false); + + /* set toeplitz hash keys */ + get_random_bytes(adapter->toeplitz_hash_key, + sizeof(adapter->toeplitz_hash_key)); + + for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++) + al_eth_hash_key_set(&adapter->hal_adapter, i, + htonl(adapter->toeplitz_hash_key[i])); + + for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) + al_eth_thash_table_set(&adapter->hal_adapter, i, 0, + adapter->rss_ind_tbl[i]); + + al_eth_fsm_table_init(adapter); +} + +static void al_eth_set_coalesce(struct al_eth_adapter *adapter, + unsigned int tx_usecs, unsigned int rx_usecs); + +static void al_eth_restore_ethtool_params(struct al_eth_adapter *adapter) +{ + int i; + unsigned int tx_usecs = adapter->tx_usecs; + unsigned int rx_usecs = adapter->rx_usecs; + + adapter->tx_usecs = 0; + adapter->rx_usecs = 0; + + al_eth_set_coalesce(adapter, tx_usecs, rx_usecs); + + for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) + al_eth_thash_table_set(&adapter->hal_adapter, i, 0, + adapter->rss_ind_tbl[i]); +} + +static void al_eth_up_complete(struct al_eth_adapter *adapter) +{ + al_eth_configure_int_mode(adapter); + + /*config rx fwd*/ + al_eth_config_rx_fwd(adapter); + + al_eth_init_napi(adapter); + al_eth_napi_enable_all(adapter); + + al_eth_change_mtu(adapter->netdev, adapter->netdev->mtu); + /* enable hw queues */ + al_eth_udma_queues_enable_all(adapter); + + al_eth_refill_all_rx_bufs(adapter); + + al_eth_interrupts_unmask(adapter); + + /* enable forwarding interrupts from eth through pci end point*/ + if ((adapter->board_type == ALPINE_FPGA_NIC) || + (adapter->board_type == ALPINE_NIC)) + writel(0x1FFFF, + adapter->internal_pcie_base + 0x1800000 + 0x1210); + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + /* enable flow control */ + al_eth_flow_ctrl_enable(adapter); + + al_eth_restore_ethtool_params(adapter); + + /* enable the mac tx and rx paths */ + al_eth_mac_start(&adapter->hal_adapter); +} + +static int al_eth_up(struct al_eth_adapter *adapter) +{ + int rc; + + netdev_info(adapter->netdev, "%s\n", __func__); + + rc = al_eth_hw_init(adapter); + if (rc) + goto err_hw_init_open; + + rc = al_eth_setup_int_mode(adapter, disable_msi); + if (rc) { + dev_err(&adapter->pdev->dev, + "%s failed at setup interrupt mode!\n", __func__); + goto err_setup_int; + } + + /* allocate transmit descriptors */ + rc = al_eth_setup_all_tx_resources(adapter); + if (rc) + goto err_setup_tx; + + /* allocate receive descriptors */ + rc = al_eth_setup_all_rx_resources(adapter); + if (rc) + goto err_setup_rx; + + rc = al_eth_request_irq(adapter); + if (rc) + goto err_req_irq; + + al_eth_up_complete(adapter); + + adapter->up = true; + + return rc; + +err_req_irq: + al_eth_free_all_rx_resources(adapter); +err_setup_rx: + al_eth_free_all_tx_resources(adapter); +err_setup_tx: + al_eth_free_irq(adapter); +err_setup_int: + al_eth_hw_stop(adapter); +err_hw_init_open: + al_eth_function_reset(adapter); + + return rc; +} + +#ifdef CONFIG_RFS_ACCEL +static int al_eth_flow_steer(struct net_device *netdev, + const struct sk_buff *skb, u16 rxq_index, + u32 flow_id) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + int rc = 0; + + if ((skb->protocol != htons(ETH_P_IP)) && + (skb->protocol != htons(ETH_P_IPV6))) + return -EPROTONOSUPPORT; + + if (skb->protocol == htons(ETH_P_IP)) { + if (ip_is_fragment(ip_hdr(skb))) + return -EPROTONOSUPPORT; + if ((ip_hdr(skb)->protocol != IPPROTO_TCP) && + (ip_hdr(skb)->protocol != IPPROTO_UDP)) + return -EPROTONOSUPPORT; + } + + if (skb->protocol == htons(ETH_P_IPV6)) { + /* ipv6 with extension not supported yet */ + if ((ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) && + (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)) + return -EPROTONOSUPPORT; + } + rc = flow_id & (AL_ETH_RX_THASH_TABLE_SIZE - 1); + + adapter->rss_ind_tbl[rc] = rxq_index; + al_eth_thash_table_set(&adapter->hal_adapter, rc, 0, rxq_index); + if (skb->protocol == htons(ETH_P_IP)) { + int nhoff = skb_network_offset(skb); + const struct iphdr *ip = + (const struct iphdr *)(skb->data + nhoff); + const __be16 *ports = + (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); + + netdev_info( + adapter->netdev, + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", + (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP", + &ip->saddr, ntohs(ports[0]), &ip->daddr, + ntohs(ports[1]), rxq_index, flow_id, rc); + } else { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + const __be16 *ports = (const __be16 *)skb_transport_header(skb); + + netdev_info( + adapter->netdev, + "steering %s %pI6c:%u:%pI6c:%u to queue %u [flow %u filter %d]\n", + (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) ? "TCP" : "UDP", + &ip6h->saddr, ntohs(ports[0]), &ip6h->daddr, + ntohs(ports[1]), rxq_index, flow_id, rc); + } + + return rc; +} +#endif + +static int al_set_features(struct net_device *dev, netdev_features_t features) +{ +#if defined(NETIF_F_MQ_TX_LOCK_OPT) + if (((features ^ dev->features) & NETIF_F_MQ_TX_LOCK_OPT) && + netif_running(dev)) { + netdev_warn( + dev, + "Can't toggle NETIF_F_MQ_TX_LOCK_OPT : device is running! \n"); + return -EINVAL; + } +#endif + return 0; +} + +/************************ Link management ************************/ +#define SFP_I2C_ADDR 0x50 + +static int al_eth_i2c_byte_read(void *context, uint8_t bus_id, uint8_t i2c_addr, + uint8_t reg_addr, uint8_t *val) +{ + struct i2c_adapter *i2c_adapter; + struct al_eth_adapter *adapter = context; + + struct i2c_msg msgs[] = { { + .addr = i2c_addr, + .flags = 0, + .len = 1, + .buf = ®_addr, + }, + { + .addr = i2c_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = val, + } }; + + i2c_adapter = i2c_get_adapter(bus_id); + + if (i2c_adapter == NULL) { + netdev_err( + adapter->netdev, + "Failed to get i2c adapter. " + "probably caused by wrong i2c bus id in the device tree, " + "wrong i2c mux implementation, or the port is configured wrongly as SFP+\n"); + return -EINVAL; + } + + if (i2c_transfer(i2c_adapter, msgs, 2) != 2) { + netdev_dbg(adapter->netdev, "Failed to read sfp+ parameters\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int al_eth_i2c_byte_write(void *context, uint8_t bus_id, + uint8_t i2c_addr, uint8_t reg_addr, + uint8_t val) +{ + struct i2c_adapter *i2c_adapter; + struct al_eth_adapter *adapter = context; + + struct i2c_msg msgs[] = { { + .addr = i2c_addr, + .flags = 0, + .len = 1, + .buf = ®_addr, + }, + { + .addr = i2c_addr, + .flags = 0, + .len = 1, + .buf = &val, + } }; + + i2c_adapter = i2c_get_adapter(bus_id); + + if (i2c_adapter == NULL) { + netdev_err( + adapter->netdev, + "Failed to get i2c adapter. " + "probably caused by wrong i2c bus id in the device tree, " + "wrong i2c mux implementation, or the port is configured wrongly as SFP+\n"); + return -EINVAL; + } + + if (i2c_transfer(i2c_adapter, msgs, 2) != 2) { + netdev_dbg(adapter->netdev, "Failed to read sfp+ parameters\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static uint8_t al_eth_get_rand_byte(void) +{ + uint8_t byte; + get_random_bytes(&byte, 1); + return byte; +} + +static void al_eth_serdes_mode_set(struct al_eth_adapter *adapter) +{ +#ifdef CONFIG_ARCH_ALPINE + enum alpine_serdes_eth_mode mode = + (adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) ? + ALPINE_SERDES_ETH_MODE_SGMII : + ALPINE_SERDES_ETH_MODE_KR; + + if ((adapter->mac_mode != AL_ETH_MAC_MODE_SGMII) && + (adapter->mac_mode != AL_ETH_MAC_MODE_10GbE_Serial)) { + netdev_err(adapter->netdev, "%s: mac_mode not supported\n", + __func__); + return; + } + + if (alpine_serdes_eth_mode_set(adapter->serdes_grp, mode)) + netdev_err(adapter->netdev, + "%s: alpine_serdes_eth_mode_set(%d, %d) failed!\n", + __func__, adapter->serdes_grp, mode); + + al_udelay(1000); +#endif +} + +static void al_eth_lm_mode_apply(struct al_eth_adapter *adapter, + enum al_eth_lm_link_mode new_mode) +{ + enum al_eth_mac_mode last_mac_mode = adapter->mac_mode; + + if (new_mode == AL_ETH_LM_MODE_DISCONNECTED) + return; + + if (new_mode == AL_ETH_LM_MODE_1G) { + adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; + adapter->link_config.active_speed = SPEED_1000; + } else { + adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; + adapter->link_config.active_speed = SPEED_10000; + } + + adapter->link_config.active_duplex = DUPLEX_FULL; + + if ((adapter->auto_speed) && (last_mac_mode != adapter->mac_mode)) + al_eth_serdes_mode_set(adapter); +} + +static void al_eth_serdes_init(struct al_eth_adapter *adapter) +{ +#ifdef CONFIG_ARCH_ALPINE + void __iomem *serdes_base; + + adapter->serdes_init = false; + + /* + * always call with group A to get the base address of + * all groups. + */ + serdes_base = alpine_serdes_resource_get(AL_SRDS_GRP_A); + + if (!serdes_base) { + netdev_err(adapter->netdev, "serdes_base get failed!\n"); + return; + } + + al_serdes_handle_init(serdes_base, &adapter->serdes_obj); + + adapter->serdes_init = true; +#endif +} + +static void al_eth_down(struct al_eth_adapter *adapter) +{ + netdev_info(adapter->netdev, "%s\n", __func__); + + BUG_ON(!adapter->up); + + adapter->up = false; + + netif_carrier_off(adapter->netdev); + al_eth_disable_int_sync(adapter); + al_eth_napi_disable_all(adapter); + netif_tx_disable(adapter->netdev); + al_eth_free_irq(adapter); + al_eth_hw_stop(adapter); + al_eth_del_napi(adapter); + + al_eth_free_all_tx_bufs(adapter); + al_eth_free_all_rx_bufs(adapter); + al_eth_free_all_tx_resources(adapter); + al_eth_free_all_rx_resources(adapter); +} + +static void al_eth_link_status_task(struct work_struct *work) +{ + struct al_eth_adapter *adapter = container_of( + to_delayed_work(work), struct al_eth_adapter, link_status_task); + enum al_eth_lm_link_mode old_mode; + enum al_eth_lm_link_mode new_mode; + al_bool fault; + al_bool link_up; + int rc; + + rc = al_eth_lm_link_detection(&adapter->lm_context, &fault, &old_mode, + &new_mode); + if (rc) + goto reschedule; + + /* The link is still up. continue */ + if (fault == false) + goto reschedule; + + if (new_mode == old_mode) { + if (new_mode == AL_ETH_LM_MODE_DISCONNECTED) + goto reschedule; + } else { + if (old_mode != AL_ETH_LM_MODE_DISCONNECTED) { + netdev_info(adapter->netdev, "%s link down\n", + __func__); + adapter->last_link = false; + al_eth_down(adapter); + } + + al_eth_lm_mode_apply(adapter, new_mode); + + if (new_mode != AL_ETH_LM_MODE_DISCONNECTED) { + if (al_eth_up(adapter)) + goto reschedule; + } else { + goto reschedule; + } + } + +#ifdef CONFIG_ARCH_ALPINE + alpine_serdes_eth_group_lock(adapter->serdes_grp); +#endif + rc = al_eth_lm_link_establish(&adapter->lm_context, &link_up); +#ifdef CONFIG_ARCH_ALPINE + alpine_serdes_eth_group_unlock(adapter->serdes_grp); +#endif + + if ((rc == 0) && (link_up == true)) { + netdev_info(adapter->netdev, "%s link up\n", __func__); + adapter->last_establish_failed = false; + + netif_carrier_on(adapter->netdev); + } else { + if (adapter->last_link != link_up) { + netdev_info(adapter->netdev, "%s link down\n", + __func__); + adapter->last_establish_failed = false; + } else if ((rc != 0) && (!adapter->last_establish_failed)) { + netdev_info(adapter->netdev, + "%s failed to establish link\n", __func__); + adapter->last_establish_failed = true; + } + + if (adapter->last_link == true) + netif_carrier_off(adapter->netdev); + } + + adapter->last_link = link_up; + +reschedule: + /* setting link status delay to 0 (through sysfs) will stop the task */ + if (adapter->link_poll_interval != 0) { + unsigned long delay; + + delay = msecs_to_jiffies(adapter->link_poll_interval); + + schedule_delayed_work(&adapter->link_status_task, delay); + } +} + +static void al_eth_lm_config(struct al_eth_adapter *adapter) +{ + struct al_eth_lm_init_params params; + + params.adapter = &adapter->hal_adapter; + params.serdes_obj = &adapter->serdes_obj; + params.grp = adapter->serdes_grp; + params.lane = adapter->serdes_lane; + params.sfp_detection = adapter->sfp_detection_needed; + if (adapter->sfp_detection_needed) { + params.sfp_bus_id = adapter->i2c_adapter_id; + params.sfp_i2c_addr = SFP_I2C_ADDR; + } + + if (adapter->sfp_detection_needed == false) { + switch (adapter->mac_mode) { + case AL_ETH_MAC_MODE_10GbE_Serial: + if (adapter->lt_en && adapter->an_en) + params.default_mode = AL_ETH_LM_MODE_10G_DA; + else + params.default_mode = AL_ETH_LM_MODE_10G_OPTIC; + break; + case AL_ETH_MAC_MODE_SGMII: + case AL_ETH_MAC_MODE_SGMII_2_5G: + params.default_mode = AL_ETH_LM_MODE_1G; + break; + default: + netdev_err(adapter->netdev, + "mac mode not supported!\n"); + params.default_mode = AL_ETH_LM_MODE_10G_DA; + } + } else { + params.default_mode = AL_ETH_LM_MODE_10G_DA; + } + + params.link_training = adapter->lt_en; + params.rx_equal = true; + params.static_values = !adapter->dont_override_serdes; + params.i2c_read = &al_eth_i2c_byte_read; + params.i2c_write = &al_eth_i2c_byte_write; + params.i2c_context = adapter; + params.get_random_byte = &al_eth_get_rand_byte; + params.kr_fec_enable = false; + + params.retimer_exist = adapter->retimer.exist; + params.retimer_bus_id = adapter->retimer.bus_id; + params.retimer_i2c_addr = adapter->retimer.i2c_addr; + params.retimer_channel = adapter->retimer.channel; + + al_eth_lm_init(&adapter->lm_context, ¶ms); +} + +#define AQUANTIA_AQR105_ID 0x3a1b4a2 + +static int al_eth_aq_phy_fixup(struct phy_device *phydev) +{ + int temp = 0; + + temp = phy_read(phydev, (MII_ADDR_C45 | (7 * 0x10000) | 0x20)); + temp &= ~(1 << 12); + + phy_write(phydev, (MII_ADDR_C45 | (7 * 0x10000) | 0x20), temp); + + temp = phy_read(phydev, (MII_ADDR_C45 | (7 * 0x10000) | 0xc400)); + temp |= ((1 << 15) | (1 << 11) | (1 << 10)); + phy_write(phydev, (MII_ADDR_C45 | (7 * 0x10000) | 0xc400), temp); + + temp = phy_read(phydev, (MII_ADDR_C45 | (7 * 0x10000) | 0)); + temp |= (1 << 9); + temp &= ~(1 << 15); + + phy_write(phydev, (MII_ADDR_C45 | (7 * 0x10000) | 0), temp); + + return 0; +} + +/** + * al_eth_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int al_eth_open(struct net_device *netdev) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + int rc; + enum al_eth_lm_link_mode old_mode; + enum al_eth_lm_link_mode new_mode; + + netdev_info(adapter->netdev, "%s\n", __func__); + + netif_carrier_off(netdev); + + /* Notify the stack of the actual queue counts. */ + rc = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (rc) + return rc; + + rc = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (rc) + return rc; + + al_eth_serdes_init(adapter); + + adapter->last_establish_failed = false; + + if (adapter->phy_exist == false) { + netdev_info(adapter->netdev, "%s: no phy, use_lm %d\n", + __func__, adapter->use_lm); + + if (adapter->use_lm) { + al_eth_lm_config(adapter); + + rc = al_eth_lm_link_detection(&adapter->lm_context, + NULL, &old_mode, + &new_mode); + if (rc) + return rc; + + al_eth_lm_mode_apply(adapter, new_mode); + + if (new_mode != AL_ETH_LM_MODE_DISCONNECTED) { + rc = al_eth_up(adapter); + if (rc) + return rc; + } + } else { + rc = al_eth_up(adapter); + if (rc) + return rc; + } + } else { + rc = al_eth_up(adapter); + if (rc) + return rc; + + if (adapter->phy_fixup_needed) { + rc = phy_register_fixup_for_uid(AQUANTIA_AQR105_ID, + 0xffffffff, + al_eth_aq_phy_fixup); + if (rc) + netdev_warn(adapter->netdev, + "failed to register PHY fixup\n"); + } + + rc = al_eth_mdiobus_setup(adapter); + printk("al eth midobus setup 0x%x\n", rc); + if (rc) { + netdev_err(netdev, "failed at mdiobus setup!\n"); + goto err_mdiobus_setup; + } + } + + if (adapter->mdio_bus) { + rc = al_eth_phy_init(adapter); + } else { + if ((adapter->board_type == ALPINE_INTEGRATED) && + (adapter->use_lm)) { + unsigned long delay; + + delay = msecs_to_jiffies( + AL_ETH_FIRST_LINK_POLL_INTERVAL); + + INIT_DELAYED_WORK(&adapter->link_status_task, + al_eth_link_status_task); + schedule_delayed_work(&adapter->link_status_task, + delay); + } else { + netif_carrier_on(adapter->netdev); + } + } + + return rc; + +err_mdiobus_setup: + al_eth_down(adapter); + + return rc; +} + +/** + * al_eth_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int al_eth_close(struct net_device *netdev) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + + netdev_dbg(adapter->netdev, "%s\n", __func__); + + if ((adapter->board_type == ALPINE_INTEGRATED) && (adapter->use_lm)) { + cancel_delayed_work_sync(&adapter->link_status_task); + } + + if (adapter->phydev) { + phy_stop(adapter->phydev); + phy_disconnect(adapter->phydev); + al_eth_mdiobus_teardown(adapter); + } + + if (adapter->up) + al_eth_down(adapter); + + /*al_eth_release_hw_control(adapter);*/ + + return 0; +} + +static int al_eth_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = adapter->phydev; + + if (phydev) { + phy_ethtool_ksettings_get(phydev, ecmd); + return 0; + } + + ecmd->base.speed = adapter->link_config.active_speed; + ecmd->base.duplex = adapter->link_config.active_duplex; + ecmd->base.autoneg = adapter->link_config.autoneg; + + return 0; +} + +static int al_eth_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ecmd) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = adapter->phydev; + int rc = 0; + + if (phydev) + return phy_ethtool_ksettings_set(phydev, ecmd); + + /* in case no phy exist set only mac parameters */ + adapter->link_config.active_speed = ecmd->base.speed; + adapter->link_config.active_duplex = ecmd->base.duplex; + adapter->link_config.autoneg = ecmd->base.autoneg; + + if (adapter->up) + dev_warn( + &adapter->pdev->dev, + "%s this action will take place in the next activation (up)\n", + __func__); + + return rc; +} + +static int al_eth_get_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *, + struct netlink_ext_ack *) +{ + struct al_eth_adapter *adapter = netdev_priv(net_dev); + + coalesce->tx_coalesce_usecs = adapter->tx_usecs; + coalesce->tx_coalesce_usecs_irq = adapter->tx_usecs; + coalesce->rx_coalesce_usecs = adapter->rx_usecs; + coalesce->rx_coalesce_usecs_irq = adapter->rx_usecs; + coalesce->use_adaptive_rx_coalesce = false; + + return 0; +} + +static void al_eth_set_coalesce(struct al_eth_adapter *adapter, + unsigned int tx_usecs, unsigned int rx_usecs) +{ + struct unit_regs *udma_base = (struct unit_regs *)(adapter->udma_base); + + if (adapter->tx_usecs != tx_usecs) { + int qid; + uint interval = (tx_usecs + 15) / 16; + BUG_ON(interval > 255); + adapter->tx_usecs = interval * 16; + for (qid = 0; qid < adapter->num_tx_queues; qid++) + al_iofic_msix_moder_interval_config( + &udma_base->gen.interrupt_regs.main_iofic, + AL_INT_GROUP_C, qid, interval); + } + if (adapter->rx_usecs != rx_usecs) { + int qid; + uint interval = (rx_usecs + 15) / 16; + BUG_ON(interval > 255); + adapter->rx_usecs = interval * 16; + for (qid = 0; qid < adapter->num_rx_queues; qid++) + al_iofic_msix_moder_interval_config( + &udma_base->gen.interrupt_regs.main_iofic, + AL_INT_GROUP_B, qid, interval); + } +} + +static int al_eth_ethtool_set_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *, + struct netlink_ext_ack *) +{ + struct al_eth_adapter *adapter = netdev_priv(net_dev); + unsigned int tx_usecs = adapter->tx_usecs; + unsigned int rx_usecs = adapter->rx_usecs; + + if (coalesce->use_adaptive_tx_coalesce) + return -EINVAL; + + if (coalesce->rx_coalesce_usecs != rx_usecs) + rx_usecs = coalesce->rx_coalesce_usecs; + else + rx_usecs = coalesce->rx_coalesce_usecs_irq; + + if (coalesce->tx_coalesce_usecs != tx_usecs) + tx_usecs = coalesce->tx_coalesce_usecs; + else + tx_usecs = coalesce->tx_coalesce_usecs_irq; + + if (tx_usecs > (255 * 16)) + return -EINVAL; + if (rx_usecs > (255 * 16)) + return -EINVAL; + + al_eth_set_coalesce(adapter, tx_usecs, rx_usecs); + + return 0; +} + +static int al_eth_nway_reset(struct net_device *netdev) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = adapter->phydev; + + if (!phydev) + return -ENODEV; + + return phy_start_aneg(phydev); +} + +static u32 al_eth_get_msglevel(struct net_device *netdev) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void al_eth_set_msglevel(struct net_device *netdev, u32 value) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = value; +} + +static void al_eth_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct al_eth_mac_stats *mac_stats = &adapter->mac_stats; + + if (!adapter->up) + return; + + al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats); + + stats->rx_packets = + mac_stats->aFramesReceivedOK; /* including pause frames */ + stats->tx_packets = + mac_stats->aFramesTransmittedOK; /* including pause frames */ + stats->rx_bytes = mac_stats->aOctetsReceivedOK; + stats->tx_bytes = mac_stats->aOctetsTransmittedOK; + stats->rx_dropped = 0; + stats->multicast = mac_stats->ifInMulticastPkts; + stats->collisions = 0; + + stats->rx_length_errors = + (mac_stats->etherStatsUndersizePkts + /* good but short */ + mac_stats->etherStatsFragments + /* short and bad*/ + mac_stats->etherStatsJabbers + /* with crc errors */ + mac_stats->etherStatsOversizePkts); + stats->rx_crc_errors = mac_stats->aFrameCheckSequenceErrors; + stats->rx_frame_errors = mac_stats->aAlignmentErrors; + stats->rx_fifo_errors = mac_stats->etherStatsDropEvents; + stats->rx_missed_errors = 0; + stats->tx_window_errors = 0; + + stats->rx_errors = mac_stats->ifInErrors; + stats->tx_errors = mac_stats->ifOutErrors; +} + +static void al_eth_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct al_eth_adapter *adapter = netdev_priv(dev); + + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); +} + +static void al_eth_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *, + struct netlink_ext_ack *) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct al_eth_ring *tx_ring = &adapter->tx_ring[0]; + struct al_eth_ring *rx_ring = &adapter->rx_ring[0]; + + ring->rx_max_pending = AL_ETH_DEFAULT_RX_DESCS; + ring->tx_max_pending = AL_ETH_DEFAULT_TX_SW_DESCS; + ring->rx_pending = rx_ring->sw_count; + ring->tx_pending = tx_ring->sw_count; +} + +static void al_eth_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct al_eth_link_config *link_config = &adapter->link_config; + + pause->autoneg = ((link_config->flow_ctrl_active & + AL_ETH_FLOW_CTRL_AUTONEG) != 0); + pause->rx_pause = ((link_config->flow_ctrl_active & + AL_ETH_FLOW_CTRL_RX_PAUSE) != 0); + pause->tx_pause = ((link_config->flow_ctrl_active & + AL_ETH_FLOW_CTRL_TX_PAUSE) != 0); +} + +static int al_eth_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct al_eth_link_config *link_config = &adapter->link_config; + uint32_t newadv; + + /* auto negotiation and receive pause are currently not supported */ + if (pause->autoneg == AUTONEG_ENABLE) + return -EINVAL; + + link_config->flow_ctrl_supported = 0; + + if (pause->rx_pause) { + link_config->flow_ctrl_supported |= AL_ETH_FLOW_CTRL_RX_PAUSE; + + if (pause->tx_pause) { + link_config->flow_ctrl_supported |= + AL_ETH_FLOW_CTRL_TX_PAUSE; + newadv = ADVERTISED_Pause; + } else + newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; + } else if (pause->tx_pause) { + link_config->flow_ctrl_supported |= AL_ETH_FLOW_CTRL_TX_PAUSE; + newadv = ADVERTISED_Asym_Pause; + } else { + newadv = 0; + } + + if (pause->autoneg) { + struct phy_device *phydev; + uint32_t oldadv; + u32 advertising; + + phydev = mdiobus_get_phy(adapter->mdio_bus, adapter->phy_addr); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + phydev->advertising); + + oldadv = advertising & + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); + link_config->flow_ctrl_supported |= AL_ETH_FLOW_CTRL_AUTONEG; + + if (oldadv != newadv) { + advertising &= + ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + advertising |= newadv; + + ethtool_convert_legacy_u32_to_link_mode( + phydev->advertising, advertising); + + if (phydev->autoneg) + return phy_start_aneg(phydev); + } + } else { + link_config->flow_ctrl_active = + link_config->flow_ctrl_supported; + al_eth_flow_ctrl_config(adapter); + } + + return 0; +} + +static int al_eth_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + /*struct al_eth_adapter *adapter = netdev_priv(netdev);*/ + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = AL_ETH_NUM_QUEUES; + return 0; + /* case ETHTOOL_GRXFH: + return bnx2x_get_rss_flags(bp, info); +*/ + default: + netdev_err(netdev, "Command parameters not supported\n"); + return -EOPNOTSUPP; + } +} + +static u32 al_eth_get_rxfh_indir_size(struct net_device *netdev) +{ + return AL_ETH_RX_RSS_TABLE_SIZE; +} + +static int al_eth_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + int i; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) + indir[i] = adapter->rss_ind_tbl[i]; + + return 0; +} + +static int al_eth_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + size_t i; + + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) { + adapter->rss_ind_tbl[i] = indir[i]; + al_eth_thash_table_set(&adapter->hal_adapter, i, 0, indir[i]); + } + + return 0; +} + +static void al_eth_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + + channels->max_rx = AL_ETH_NUM_QUEUES; + channels->max_tx = AL_ETH_NUM_QUEUES; + channels->max_other = 0; + channels->max_combined = 0; + channels->rx_count = adapter->num_rx_queues; + channels->tx_count = adapter->num_tx_queues; + channels->other_count = 0; + channels->combined_count = 0; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) +static int al_eth_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct al_eth_eee_params params; + + if (!adapter->phy_exist) + return -EOPNOTSUPP; + + al_eth_eee_get(&adapter->hal_adapter, ¶ms); + + edata->eee_enabled = params.enable; + edata->tx_lpi_timer = params.tx_eee_timer; + + return phy_ethtool_get_eee(adapter->phydev, edata); +} + +static int al_eth_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct al_eth_eee_params params; + + struct phy_device *phydev; + + if (!adapter->phy_exist) + return -EOPNOTSUPP; + + phydev = mdiobus_get_phy(adapter->mdio_bus, adapter->phy_addr); + + phy_init_eee(phydev, 1); + + params.enable = edata->eee_enabled; + params.tx_eee_timer = edata->tx_lpi_timer; + params.min_interval = 10; + + al_eth_eee_config(&adapter->hal_adapter, ¶ms); + + return phy_ethtool_set_eee(phydev, edata); +} +#endif + +static void al_eth_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev; + + wol->wolopts = adapter->wol; + + if ((adapter) && (adapter->phy_exist) && (adapter->mdio_bus)) { + phydev = mdiobus_get_phy(adapter->mdio_bus, adapter->phy_addr); + if (phydev) { + phy_ethtool_get_wol(phydev, wol); + wol->supported |= WAKE_PHY; + return; + } + } + + wol->supported |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; +} + +static int al_eth_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev; + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + adapter->wol = wol->wolopts; + + if ((adapter) && (adapter->phy_exist) && (adapter->mdio_bus)) { + phydev = mdiobus_get_phy(adapter->mdio_bus, adapter->phy_addr); + if (phydev) + return phy_ethtool_set_wol(phydev, wol); + } + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static struct { + const char str[ETH_GSTRING_LEN]; +} al_ethtool_stats_keys[] = { + { "mac_aOctetsReceivedOK" }, + { "mac_aOctetsTransmittedOK" }, + { "mac_etherStatsPkts" }, + { "mac_ifInUcastPkts" }, + { "mac_ifInMulticastPkts" }, + { "mac_ifInBroadcastPkts" }, + { "mac_ifInErrors" }, + { "mac_ifOutUcastPkts" }, + { "mac_ifOutMulticastPkts" }, + { "mac_ifOutBroadcastPkts" }, + { "mac_ifOutErrors" }, + { "mac_aFramesReceivedOK" }, + { "mac_aFramesTransmittedOK" }, + { "mac_etherStatsUndersizePkts" }, + { "mac_etherStatsFragments" }, + { "mac_etherStatsJabbers" }, + { "mac_etherStatsOversizePkts" }, + { "mac_aFrameCheckSequenceErrors" }, + { "mac_aAlignmentErrors" }, + { "mac_etherStatsDropEvents" }, + { "mac_aPAUSEMACCtrlTxFrames" }, + { "mac_aPAUSEMACCtrlRxFrames" }, + { "mac_aFrameTooLongErrors" }, + { "mac_aInRangeLengthErrors" }, + { "mac_VLANTransmittedOK" }, + { "mac_VLANReceivedOK" }, + { "mac_etherStatsOctets" }, + { "mac_etherStatsPkts64" }, + { "mac_etherStatsPkts65to127" }, + { "mac_etherStatsPkts128to255" }, + { "mac_etherStatsPkts256to511" }, + { "mac_etherStatsPkts512to1023" }, + { "mac_etherStatsPkts1024to1518" }, + { "mac_etherStatsPkts1519toX" }, + { "mac_eee_in" }, + { "mac_eee_out" }, + + { "ec_faf_in_rx_pkt" }, + { "ec_faf_in_rx_short" }, + { "ec_faf_in_rx_long" }, + { "ec_faf_out_rx_pkt" }, + { "ec_faf_out_rx_short" }, + { "ec_faf_out_rx_long" }, + { "ec_faf_out_drop" }, + { "ec_rxf_in_rx_pkt" }, + { "ec_rxf_in_fifo_err" }, + { "ec_lbf_in_rx_pkt" }, + { "ec_lbf_in_fifo_err" }, + { "ec_rxf_out_rx_1_pkt" }, + { "ec_rxf_out_rx_2_pkt" }, + { "ec_rxf_out_drop_1_pkt" }, + { "ec_rxf_out_drop_2_pkt" }, + { "ec_rpe_1_in_rx_pkt" }, + { "ec_rpe_1_out_rx_pkt" }, + { "ec_rpe_2_in_rx_pkt" }, + { "ec_rpe_2_out_rx_pkt" }, + { "ec_rpe_3_in_rx_pkt" }, + { "ec_rpe_3_out_rx_pkt" }, + { "ec_tpe_in_tx_pkt" }, + { "ec_tpe_out_tx_pkt" }, + { "ec_tpm_tx_pkt" }, + { "ec_tfw_in_tx_pkt" }, + { "ec_tfw_out_tx_pkt" }, + { "ec_rfw_in_rx_pkt" }, + { "ec_rfw_in_vlan_drop" }, + { "ec_rfw_in_parse_drop" }, + { "ec_rfw_in_mc" }, + { "ec_rfw_in_bc" }, + { "ec_rfw_in_vlan_exist" }, + { "ec_rfw_in_vlan_nexist" }, + { "ec_rfw_in_mac_drop" }, + { "ec_rfw_in_mac_ndet_drop" }, + { "ec_rfw_in_ctrl_drop" }, + { "ec_rfw_in_prot_i_drop" }, + { "ec_eee_in" }, +}; + +static void al_eth_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + memcpy(data, al_ethtool_stats_keys, sizeof(al_ethtool_stats_keys)); +} + +static int al_eth_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(al_ethtool_stats_keys); + default: + return -EOPNOTSUPP; + } +} + +static void al_eth_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + struct al_eth_mac_stats *mac_stats = &adapter->mac_stats; + struct al_eth_ec_stats *ec_stats = &adapter->ec_stats; + int i = 0; + + al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats); + al_eth_ec_stats_get(&adapter->hal_adapter, ec_stats); + + data[i++] = mac_stats->aOctetsReceivedOK; + data[i++] = mac_stats->aOctetsTransmittedOK; + data[i++] = mac_stats->etherStatsPkts; + data[i++] = mac_stats->ifInUcastPkts; + data[i++] = mac_stats->ifInMulticastPkts; + data[i++] = mac_stats->ifInBroadcastPkts; + data[i++] = mac_stats->ifInErrors; + data[i++] = mac_stats->ifOutUcastPkts; + data[i++] = mac_stats->ifOutMulticastPkts; + data[i++] = mac_stats->ifOutBroadcastPkts; + data[i++] = mac_stats->ifOutErrors; + data[i++] = mac_stats->aFramesReceivedOK; + data[i++] = mac_stats->aFramesTransmittedOK; + data[i++] = mac_stats->etherStatsUndersizePkts; + data[i++] = mac_stats->etherStatsFragments; + data[i++] = mac_stats->etherStatsJabbers; + data[i++] = mac_stats->etherStatsOversizePkts; + data[i++] = mac_stats->aFrameCheckSequenceErrors; + data[i++] = mac_stats->aAlignmentErrors; + data[i++] = mac_stats->etherStatsDropEvents; + data[i++] = mac_stats->aPAUSEMACCtrlFramesTransmitted; + data[i++] = mac_stats->aPAUSEMACCtrlFramesReceived; + data[i++] = mac_stats->aFrameTooLongErrors; + data[i++] = mac_stats->aInRangeLengthErrors; + data[i++] = mac_stats->VLANTransmittedOK; + data[i++] = mac_stats->VLANReceivedOK; + data[i++] = mac_stats->etherStatsOctets; + data[i++] = mac_stats->etherStatsPkts64Octets; + data[i++] = mac_stats->etherStatsPkts65to127Octets; + data[i++] = mac_stats->etherStatsPkts128to255Octets; + data[i++] = mac_stats->etherStatsPkts256to511Octets; + data[i++] = mac_stats->etherStatsPkts512to1023Octets; + data[i++] = mac_stats->etherStatsPkts1024to1518Octets; + data[i++] = mac_stats->etherStatsPkts1519toX; + data[i++] = mac_stats->eee_in; + data[i++] = mac_stats->eee_out; + + data[i++] = ec_stats->faf_in_rx_pkt; + data[i++] = ec_stats->faf_in_rx_short; + data[i++] = ec_stats->faf_in_rx_long; + data[i++] = ec_stats->faf_out_rx_pkt; + data[i++] = ec_stats->faf_out_rx_short; + data[i++] = ec_stats->faf_out_rx_long; + data[i++] = ec_stats->faf_out_drop; + data[i++] = ec_stats->rxf_in_rx_pkt; + data[i++] = ec_stats->rxf_in_fifo_err; + data[i++] = ec_stats->lbf_in_rx_pkt; + data[i++] = ec_stats->lbf_in_fifo_err; + data[i++] = ec_stats->rxf_out_rx_1_pkt; + data[i++] = ec_stats->rxf_out_rx_2_pkt; + data[i++] = ec_stats->rxf_out_drop_1_pkt; + data[i++] = ec_stats->rxf_out_drop_2_pkt; + data[i++] = ec_stats->rpe_1_in_rx_pkt; + data[i++] = ec_stats->rpe_1_out_rx_pkt; + data[i++] = ec_stats->rpe_2_in_rx_pkt; + data[i++] = ec_stats->rpe_2_out_rx_pkt; + data[i++] = ec_stats->rpe_3_in_rx_pkt; + data[i++] = ec_stats->rpe_3_out_rx_pkt; + data[i++] = ec_stats->tpe_in_tx_pkt; + data[i++] = ec_stats->tpe_out_tx_pkt; + data[i++] = ec_stats->tpm_tx_pkt; + data[i++] = ec_stats->tfw_in_tx_pkt; + data[i++] = ec_stats->tfw_out_tx_pkt; + data[i++] = ec_stats->rfw_in_rx_pkt; + data[i++] = ec_stats->rfw_in_vlan_drop; + data[i++] = ec_stats->rfw_in_parse_drop; + data[i++] = ec_stats->rfw_in_mc; + data[i++] = ec_stats->rfw_in_bc; + data[i++] = ec_stats->rfw_in_vlan_exist; + data[i++] = ec_stats->rfw_in_vlan_nexist; + data[i++] = ec_stats->rfw_in_mac_drop; + data[i++] = ec_stats->rfw_in_mac_ndet_drop; + data[i++] = ec_stats->rfw_in_ctrl_drop; + data[i++] = ec_stats->rfw_in_prot_i_drop; + data[i++] = ec_stats->eee_in; +} + +static const struct ethtool_ops al_eth_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_link_ksettings = al_eth_get_link_ksettings, + .set_link_ksettings = al_eth_set_link_ksettings, + .get_drvinfo = al_eth_get_drvinfo, + .get_wol = al_eth_get_wol, + .set_wol = al_eth_set_wol, + .get_msglevel = al_eth_get_msglevel, + .set_msglevel = al_eth_set_msglevel, + .nway_reset = al_eth_nway_reset, + .get_link = ethtool_op_get_link, + .get_coalesce = al_eth_get_coalesce, + .set_coalesce = al_eth_ethtool_set_coalesce, + .get_ringparam = al_eth_get_ringparam, + .get_pauseparam = al_eth_get_pauseparam, + .set_pauseparam = al_eth_set_pauseparam, + .get_strings = al_eth_get_strings, + .get_ethtool_stats = al_eth_get_ethtool_stats, + .get_rxnfc = al_eth_get_rxnfc, + .get_sset_count = al_eth_get_sset_count, + .get_rxfh_indir_size = al_eth_get_rxfh_indir_size, + .get_rxfh = al_eth_get_rxfh, + .set_rxfh = al_eth_set_rxfh, + .get_channels = al_eth_get_channels, + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + .get_eee = al_eth_get_eee, + .set_eee = al_eth_set_eee, +#endif + +}; + +static void al_eth_tx_csum(struct al_eth_ring *tx_ring, + struct al_eth_tx_buffer *tx_info, + struct al_eth_pkt *hal_pkt, struct sk_buff *skb) +{ + u32 mss = skb_shinfo(skb)->gso_size; + if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { + struct al_eth_meta_data *meta = &tx_ring->hal_meta; + if (mss) + hal_pkt->flags |= AL_ETH_TX_FLAGS_TSO | + AL_ETH_TX_FLAGS_L4_CSUM; + else + hal_pkt->flags |= AL_ETH_TX_FLAGS_L4_CSUM | + AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM; + + switch (skb->protocol) { + case __constant_htons(ETH_P_IP): + hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4; + if (mss) + hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP; + else + hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP; + break; + case __constant_htons(ETH_P_IPV6): + hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6; + /* TODO: add support for csum offloading for ipv6 with options */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP; + else + hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP; + break; + default: + break; + } + + meta->words_valid = 4; + meta->l3_header_len = skb_network_header_len(skb); + meta->l3_header_offset = skb_network_offset(skb); + meta->l4_header_len = + tcp_hdr(skb)->doff; /* this param needed only + for TSO */ + meta->mss_idx_sel = 0; /* TODO: check how to select MSS */ + meta->mss_val = skb_shinfo(skb)->gso_size; + hal_pkt->meta = meta; + } else + hal_pkt->meta = NULL; +} + +/* Called with netif_tx_lock. + */ +static netdev_tx_t al_eth_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct al_eth_adapter *adapter = netdev_priv(dev); + dma_addr_t dma; + struct al_eth_tx_buffer *tx_info; + struct al_eth_pkt *hal_pkt; + struct al_buf *al_buf; + u32 len, last_frag; + u16 next_to_use; + int i, qid; + struct al_eth_ring *tx_ring; + struct netdev_queue *txq; + + netdev_dbg(adapter->netdev, "%s skb %p\n", __func__, skb); + /* Determine which tx ring we will be placed on */ + qid = skb_get_queue_mapping(skb); + tx_ring = &adapter->tx_ring[qid]; + txq = netdev_get_tx_queue(dev, qid); + + len = skb_headlen(skb); + + dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + next_to_use = tx_ring->next_to_use; + tx_info = &tx_ring->tx_buffer_info[next_to_use]; + tx_info->skb = skb; + hal_pkt = &tx_info->hal_pkt; + + /* set flags and meta data */ + hal_pkt->flags = AL_ETH_TX_FLAGS_INT; + al_eth_tx_csum(tx_ring, tx_info, hal_pkt, skb); + + al_buf = hal_pkt->bufs; + + dma_unmap_addr_set(al_buf, addr, dma); + dma_unmap_len_set(al_buf, len, len); + + last_frag = skb_shinfo(skb)->nr_frags; + + for (i = 0; i < last_frag; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + al_buf++; + + len = skb_frag_size(frag); + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + dma_unmap_addr_set(al_buf, addr, dma); + dma_unmap_len_set(al_buf, len, len); + } + + hal_pkt->num_of_bufs = 1 + last_frag; + if (unlikely(last_frag > (AL_ETH_PKT_MAX_BUFS - 2))) { + int i; + netdev_err(adapter->netdev, + "too much descriptors. last_frag %d!\n", last_frag); + for (i = 0; i <= last_frag; i++) + netdev_err(adapter->netdev, + "frag[%d]: addr:0x%llx, len 0x%x\n", i, + (unsigned long long)hal_pkt->bufs[i].addr, + hal_pkt->bufs[i].len); + BUG(); + } + netdev_tx_sent_queue(txq, skb->len); + + /*smp_wmb();*/ /* commit the item before incrementing the head */ + tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use); + + /* prepare the packet's descriptors to dma engine */ + tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt); + + /* stop the queue when no more space available, the packet can have up + * to MAX_SKB_FRAGS + 1 buffers and a meta descriptor */ + if (unlikely(al_udma_available_get(tx_ring->dma_q) < + (MAX_SKB_FRAGS + 2))) { + dev_dbg(&adapter->pdev->dev, "%s stop queue %d\n", __func__, + qid); + netif_tx_stop_queue(txq); + } + + /* trigger the dma engine */ + al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs); + + return NETDEV_TX_OK; + +dma_error: + /* save value of frag that failed */ + last_frag = i; + + /* start back at beginning and unmap skb */ + tx_info->skb = NULL; + al_buf = hal_pkt->bufs; + dma_unmap_single(tx_ring->dev, dma_unmap_addr(al_buf, addr), + dma_unmap_len(al_buf, len), DMA_TO_DEVICE); + + /* unmap remaining mapped pages */ + for (i = 0; i < last_frag; i++) { + al_buf++; + dma_unmap_page(tx_ring->dev, dma_unmap_addr(al_buf, addr), + dma_unmap_len(al_buf, len), DMA_TO_DEVICE); + } + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +/* Return subqueue id on this core (one per core). */ +static u16 al_eth_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ +#ifdef CONFIG_ARCH_ALPINE + return smp_processor_id(); +#else + return smp_processor_id() % AL_ETH_NUM_QUEUES; +#endif +} + +static int al_eth_set_mac_addr(struct net_device *dev, void *p) +{ + struct al_eth_adapter *adapter = netdev_priv(dev); + struct sockaddr *addr = p; + int err = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + dev_addr_set(dev, addr->sa_data); + memcpy(adapter->mac_addr, addr->sa_data, dev->addr_len); + al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE, + adapter->mac_addr, 1); + + if (!netif_running(dev)) + return 0; + + return err; +} + +/** + * Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void al_eth_set_rx_mode(struct net_device *netdev) +{ + struct al_eth_adapter *adapter = netdev_priv(netdev); + + if (netdev->flags & IFF_PROMISC) { + al_eth_mac_table_promiscuous_set(adapter, true); + } else { + if (netdev->flags & IFF_ALLMULTI) { + al_eth_mac_table_all_multicast_add( + adapter, AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1); + } else { + if (netdev_mc_empty(netdev)) + al_eth_mac_table_entry_clear( + adapter, + AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX); + else + al_eth_mac_table_all_multicast_add( + adapter, + AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1); + } + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + uint8_t i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1; + + if (netdev_uc_count(netdev) > + AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) { + /* In this case there are more addresses then + * entries in the mac table - set promiscuous */ + al_eth_mac_table_promiscuous_set(adapter, true); + return; + } + + /* clear the last configuration */ + while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1 + + AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) { + al_eth_mac_table_entry_clear(adapter, i); + i++; + } + + /* set new addresses */ + i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1; + netdev_for_each_uc_addr(ha, netdev) { + al_eth_mac_table_unicast_add(adapter, i, + ha->addr, 1); + i++; + } + } + + al_eth_mac_table_promiscuous_set(adapter, false); + } +} + +static const struct net_device_ops al_eth_netdev_ops = { + .ndo_open = al_eth_open, + .ndo_stop = al_eth_close, + .ndo_start_xmit = al_eth_start_xmit, + .ndo_select_queue = al_eth_select_queue, + .ndo_get_stats64 = al_eth_get_stats64, + .ndo_do_ioctl = al_eth_ioctl, + .ndo_tx_timeout = al_eth_tx_timeout, + .ndo_change_mtu = al_eth_change_mtu, + .ndo_set_mac_address = al_eth_set_mac_addr, + .ndo_set_rx_mode = al_eth_set_rx_mode, +#if 0 + .ndo_validate_addr = eth_validate_addr, + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = al_eth_netpoll, +#endif +#endif +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = al_eth_flow_steer, +#endif + .ndo_set_features = al_set_features, +}; + +/** + * al_eth_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in al_eth_pci_tbl + * + * Returns 0 on success, negative on failure + * + * al_eth_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int al_eth_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int version_printed; + struct net_device *netdev; + struct al_eth_adapter *adapter; + void __iomem *const *iomap; + struct al_hal_eth_adapter *hal_adapter; + static int adapters_found; + u16 dev_id; + u8 rev_id; + int i; + + int rc; + + dev_dbg(&pdev->dev, "%s\n", __func__); + + if (version_printed++ == 0) + pr_info("%s", version); + + rc = pcim_enable_device(pdev); + if (rc) { + dev_err(&pdev->dev, "pcim_enable_device failed!\n"); + return rc; + } + + if (ent->driver_data == ALPINE_INTEGRATED) + rc = pcim_iomap_regions(pdev, (1 << 0) | (1 << 2) | (1 << 4), + DRV_MODULE_NAME); + else + rc = pcim_iomap_regions(pdev, + (1 << board_info[ent->driver_data].bar), + DRV_MODULE_NAME); + + if (rc) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", rc); + return rc; + } + + iomap = pcim_iomap_table(pdev); + if (!iomap) { + dev_err(&pdev->dev, "pcim_iomap_table failed\n"); + return -ENOMEM; + } + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (rc) { + dev_err(&pdev->dev, "dma_set_mask_and_coherent failed 0x%x\n", + rc); + return rc; + } + + pci_set_master(pdev); + pci_save_state(pdev); + + /* dev zeroed in init_etherdev */ + netdev = alloc_etherdev_mq(sizeof(struct al_eth_adapter), + AL_ETH_NUM_QUEUES); + if (!netdev) { + dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + pci_set_drvdata(pdev, adapter); + + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->board_type = ent->driver_data; + hal_adapter = &adapter->hal_adapter; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + if (adapter->board_type == ALPINE_INTEGRATED) { + adapter->udma_base = iomap[AL_ETH_UDMA_BAR]; + adapter->ec_base = iomap[AL_ETH_EC_BAR]; + adapter->mac_base = iomap[AL_ETH_MAC_BAR]; + } else { + u16 adapter_pci_cmd; + + /* + * pci adapter configuration space: 0-4K + * BAR0-ETH_CTL: 20K-36K (start offset 0x5000) + * BAR1-MAC_CTL: 36K-40K (start offset 0x9000) + * BAR2-UDMA: 128K-256K + */ + adapter->internal_pcie_base = + iomap[board_info[ent->driver_data].bar]; + adapter->udma_base = + iomap[board_info[ent->driver_data].bar] + 128 * 0x400; + adapter->ec_base = + iomap[board_info[ent->driver_data].bar] + 20 * 0x400; + adapter->mac_base = + iomap[board_info[ent->driver_data].bar] + 36 * 0x400; + /* enable master/slave in the adapter conf */ + adapter_pci_cmd = + readw(adapter->internal_pcie_base + PCI_COMMAND); + adapter_pci_cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; + adapter_pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + + writew(adapter_pci_cmd, + adapter->internal_pcie_base + PCI_COMMAND); + } + + if (adapter->board_type == ALPINE_INTEGRATED) { + pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id); + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); + } else { + dev_id = readw(adapter->internal_pcie_base + PCI_DEVICE_ID); + rev_id = readb(adapter->internal_pcie_base + PCI_REVISION_ID); + } + + adapter->rev_id = rev_id; + adapter->dev_id = dev_id; + adapter->id_number = adapters_found; + + /* set default ring sizes */ + adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS; + adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS; + adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS; + adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS; + + adapter->num_tx_queues = AL_ETH_NUM_QUEUES; + adapter->num_rx_queues = AL_ETH_NUM_QUEUES; + + adapter->small_copy_len = AL_ETH_DEFAULT_SMALL_PACKET_LEN; + adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL; + adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE; + adapter->link_config.force_1000_base_x = + AL_ETH_DEFAULT_FORCE_1000_BASEX; + + spin_lock_init(&adapter->serdes_config_lock); + + snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "al_eth_%d", + adapter->id_number); + rc = al_eth_board_params_init(adapter); + if (rc) + goto fail_free_netdev; + + al_eth_function_reset(adapter); + + rc = al_eth_hw_init(adapter); + if (rc) + goto fail_free_netdev; + + al_eth_init_rings(adapter); + + netdev->netdev_ops = &al_eth_netdev_ops; + netdev->watchdog_timeo = TX_TIMEOUT; + netdev->ethtool_ops = &al_eth_ethtool_ops; + + if (!is_valid_ether_addr(adapter->mac_addr)) { + eth_hw_addr_random(netdev); + memcpy(adapter->mac_addr, netdev->dev_addr, ETH_ALEN); + } else { + dev_addr_set(netdev, adapter->mac_addr); + } + + memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); + + netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_RXCSUM | NETIF_F_NTUPLE | NETIF_F_RXHASH | +#ifdef CONFIG_NET_MQ_TX_LOCK_OPT + NETIF_F_MQ_TX_LOCK_OPT | +#endif + NETIF_F_HIGHDMA; + + netdev->features = netdev->hw_features; +#if defined(NETIF_F_MQ_TX_LOCK_OPT) + netdev->features &= ~NETIF_F_MQ_TX_LOCK_OPT; +#endif + netdev->priv_flags |= IFF_UNICAST_FLT; + + for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) + adapter->rss_ind_tbl[i] = + ethtool_rxfh_indir_default(i, AL_ETH_NUM_QUEUES); + + rc = register_netdev(netdev); + if (rc) { + dev_err(&pdev->dev, "Cannot register net device\n"); + goto fail_unregister_netdev; + } + + rc = al_eth_sysfs_init(&adapter->pdev->dev); + if (rc) + goto fail_unregister_netdev; + + rc = al_eth_sw_mdio_probe(adapter); + if (rc) + goto fail_unregister_netdev; + + netdev_info(netdev, "%s found at mem %lx, mac addr %pM\n", + board_info[ent->driver_data].name, + (long)pci_resource_start(pdev, 0), netdev->dev_addr); + + adapters_found++; + + return 0; + +fail_unregister_netdev: + unregister_netdev(netdev); + +fail_free_netdev: + free_netdev(netdev); + + return rc; +} + +/** + * al_eth_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * al_eth_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + **/ +static void al_eth_remove(struct pci_dev *pdev) +{ + struct al_eth_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *dev = adapter->netdev; + + al_eth_sw_mdio_remove(adapter); + + /* disable forwarding interrupts from eth through pci end point*/ + if (adapter->board_type == ALPINE_FPGA_NIC) + writel(0, adapter->internal_pcie_base + 0x1800000 + 0x1210); + + unregister_netdev(dev); + + al_eth_sysfs_terminate(&pdev->dev); + + free_netdev(dev); + + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int al_eth_resume(struct pci_dev *pdev) +{ + struct al_eth_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + if (adapter->rev_id > AL_ETH_REV_ID_0) { + al_eth_wol_disable(&adapter->hal_adapter); + } else { + al_eth_mac_table_unicast_add(adapter, + AL_ETH_MAC_TABLE_UNICAST_IDX_BASE, + adapter->mac_addr, 1); + al_eth_mac_table_broadcast_add( + adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1); + al_eth_mac_table_promiscuous_set(adapter, false); + al_eth_set_rx_mode(netdev); + } + + netif_device_attach(netdev); + + return 0; +} + +static int al_eth_wol_config(struct al_eth_adapter *adapter) +{ + if (adapter->rev_id > AL_ETH_REV_ID_0) { + struct al_eth_wol_params wol = { 0 }; + + if (adapter->wol & WAKE_UCAST) { + wol.int_mask = AL_ETH_WOL_INT_UNICAST; + wol.forward_mask = AL_ETH_WOL_FWRD_UNICAST; + } + + if (adapter->wol & WAKE_MCAST) { + wol.int_mask = AL_ETH_WOL_INT_MULTICAST; + wol.forward_mask = AL_ETH_WOL_FWRD_MULTICAST; + } + + if (adapter->wol & WAKE_BCAST) { + wol.int_mask = AL_ETH_WOL_INT_BROADCAST; + wol.forward_mask = AL_ETH_WOL_FWRD_BROADCAST; + } + + if (wol.int_mask != 0) { + al_eth_wol_enable(&adapter->hal_adapter, &wol); + return 1; + } + + return 0; + } + + if (!(adapter->wol & WAKE_UCAST)) { + int i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE; + /* clear all unicast addresses */ + while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1 + + AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) { + al_eth_mac_table_entry_clear(adapter, i); + i++; + } + } + + if (!(adapter->wol & WAKE_MCAST)) + al_eth_mac_table_entry_clear( + adapter, AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX); + + if (!(adapter->wol & WAKE_BCAST)) + al_eth_mac_table_entry_clear(adapter, + AL_ETH_MAC_TABLE_BROADCAST_IDX); + + if (adapter->wol) + return 1; + + return 0; +} + +static int al_eth_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct al_eth_adapter *adapter = pci_get_drvdata(pdev); + + if (al_eth_wol_config(adapter)) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +static struct pci_driver al_eth_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = al_eth_pci_tbl, + .probe = al_eth_probe, + .remove = al_eth_remove, +#ifdef CONFIG_PM + .suspend = al_eth_suspend, + .resume = al_eth_resume, +#endif +}; + +static int __init al_eth_init(void) +{ +#ifdef CONFIG_AL_ETH_ALLOC_SKB + struct sk_buff_head *rx_rc; + int cpu; + + for_each_possible_cpu(cpu) { + rx_rc = &per_cpu(rx_recycle_cache, cpu); + skb_queue_head_init(rx_rc); + } +#endif + return pci_register_driver(&al_eth_pci_driver); +} + +static void __exit al_eth_cleanup(void) +{ +#ifdef CONFIG_AL_ETH_ALLOC_SKB + struct sk_buff_head *rx_rc; + int cpu; + + for_each_possible_cpu(cpu) { + rx_rc = &per_cpu(rx_recycle_cache, cpu); + skb_queue_purge(rx_rc); + } +#endif + pci_unregister_driver(&al_eth_pci_driver); +} + +module_init(al_eth_init); +module_exit(al_eth_cleanup); diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_eth.h b/target/linux/alpine/files/drivers/net/ethernet/al/al_eth.h new file mode 100644 index 00000000000000..63e31e7c5eb680 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_eth.h @@ -0,0 +1,337 @@ +/* + * al_eth.c: AnnapurnaLabs Unified 1GbE and 10GbE ethernet driver header. + * + * Copyright (C) 2014 Annapurna Labs Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef AL_ETH_H +#define AL_ETH_H + +#include +#include +#include +#include +#include + +#include +#include +#include +#include "al_hal_eth.h" +#include "al_init_eth_lm.h" + +enum board_t { + ALPINE_INTEGRATED = 0, + ALPINE_NIC = 1, + ALPINE_FPGA_NIC = 2, +}; + +#define AL_ETH_MAX_HW_QUEUES 4 +#define AL_ETH_NUM_QUEUES 4 +#define AL_ETH_MAX_MSIX_VEC (1 + 2 * AL_ETH_MAX_HW_QUEUES) + +#define AL_ETH_DEFAULT_TX_SW_DESCS (512) +#define AL_ETH_DEFAULT_TX_HW_DESCS (512) +#define AL_ETH_DEFAULT_RX_DESCS (512) + +#if ((AL_ETH_DEFAULT_TX_SW_DESCS / 4) < (MAX_SKB_FRAGS + 2)) +#define AL_ETH_TX_WAKEUP_THRESH (AL_ETH_DEFAULT_TX_SW_DESCS / 4) +#else +#define AL_ETH_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2) +#endif +#define AL_ETH_DEFAULT_SMALL_PACKET_LEN (256 - NET_IP_ALIGN) +#define AL_ETH_HEADER_COPY_SIZE (256 - NET_IP_ALIGN) + +#define AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE 1536 +/* + * minimum the buffer size to 600 to avoid situation the mtu will be changed + * from too little buffer to very big one and then the number of buffer per + * packet could reach the maximum AL_ETH_PKT_MAX_BUFS + */ +#define AL_ETH_DEFAULT_MIN_RX_BUFF_ALLOC_SIZE 600 +#define AL_ETH_DEFAULT_FORCE_1000_BASEX AL_FALSE + +#define AL_ETH_DEFAULT_LINK_POLL_INTERVAL 100 +#define AL_ETH_FIRST_LINK_POLL_INTERVAL 1 + +#define AL_ETH_NAME_MAX_LEN 20 +#define AL_ETH_IRQNAME_SIZE 40 + +#define AL_ETH_DEFAULT_MDIO_FREQ_KHZ 2500 + +#define AL_ETH_MAX_MTU 9216 + +struct al_eth_irq { + irq_handler_t handler; + void *data; + unsigned int vector; + u8 requested; + cpumask_t affinity_hint_mask; + char name[AL_ETH_IRQNAME_SIZE]; +}; + +struct al_eth_napi { + struct napi_struct napi ____cacheline_aligned; + struct al_eth_adapter *adapter; + unsigned int qid; +}; + +struct al_eth_tx_buffer { + struct sk_buff *skb; + struct al_eth_pkt hal_pkt; + unsigned int tx_descs; +}; + +struct al_eth_rx_buffer { + struct sk_buff *skb; + struct page *page; + unsigned int page_offset; +#if (defined(CONFIG_AL_ETH_ALLOC_FRAG) || defined(CONFIG_AL_ETH_ALLOC_SKB)) + u8 *data; + unsigned int data_size; + unsigned int frag_size; /* used in rx skb allocation */ +#endif + DEFINE_DMA_UNMAP_ADDR(dma); + struct al_buf al_buf; +}; + +#define AL_ETH_RX_OFFSET NET_SKB_PAD + NET_IP_ALIGN + +struct al_eth_ring { + struct device *dev; + struct napi_struct *napi; + struct al_eth_pkt hal_pkt; /* used to get rx packets from hal */ + struct al_udma_q *dma_q; /* udma queue handler */ + u16 next_to_use; + u16 next_to_clean; + u32 __iomem *unmask_reg_offset; /* the offset of the interrupt unmask register */ + u32 unmask_val; /* the value to write to the above register to + * unmask the interrupt of this ring + */ + /* need to use union here */ + struct al_eth_meta_data hal_meta; + struct al_eth_tx_buffer *tx_buffer_info; /* contex of tx packet */ + struct al_eth_rx_buffer *rx_buffer_info; /* contex of rx packet */ + int sw_count; /* number of tx/rx_buffer_info's entries */ + int hw_count; /* number of hw descriptors */ + size_t descs_size; /* size (in bytes) of hw descriptors */ + size_t cdescs_size; /* size (in bytes) of hw completion descriptors, used + for rx */ + + struct net_device *netdev; + struct al_udma_q_params q_params; +}; + + +#define AL_ETH_TX_RING_IDX_NEXT(tx_ring, idx) (((idx) + 1) & (AL_ETH_DEFAULT_TX_SW_DESCS - 1)) + +#define AL_ETH_RX_RING_IDX_NEXT(rx_ring, idx) (((idx) + 1) & (AL_ETH_DEFAULT_RX_DESCS - 1)) +#define AL_ETH_RX_RING_IDX_ADD(rx_ring, idx, n) (((idx) + (n)) & (AL_ETH_DEFAULT_RX_DESCS - 1)) + +/* flow control configuration */ +#define AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH 0x160 +#define AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW 0x90 +#define AL_ETH_FLOW_CTRL_QUANTA 0xffff +#define AL_ETH_FLOW_CTRL_QUANTA_TH 0x8000 + +#define AL_ETH_FLOW_CTRL_AUTONEG BIT(0) +#define AL_ETH_FLOW_CTRL_RX_PAUSE BIT(1) +#define AL_ETH_FLOW_CTRL_TX_PAUSE BIT(2) + +/* link configuration for 1G port */ +struct al_eth_link_config { + int old_link; + /* Describes what we actually have. */ + int active_duplex; + int active_speed; + + /* current flow control status */ + uint8_t flow_ctrl_active; + /* supported configuration (can be changed from ethtool) */ + uint8_t flow_ctrl_supported; + + /* the following are not relevant to RGMII */ + bool force_1000_base_x; + bool autoneg; +}; + +/* SFP detection event */ +enum al_eth_sfp_detect_evt { + /* No change (no connect, disconnect, or new SFP module */ + AL_ETH_SFP_DETECT_EVT_NO_CHANGE, + /* SFP module connected */ + AL_ETH_SFP_DETECT_EVT_CONNECTED, + /* SFP module disconnected */ + AL_ETH_SFP_DETECT_EVT_DISCONNECTED, + /* SFP module replaced */ + AL_ETH_SFP_DETECT_EVT_CHANGED, +}; + +/* SFP detection status */ +struct al_eth_sfp_detect_stat { + /* Status is valid (i.e. rest of fields are valid) */ + bool valid; + bool connected; + uint8_t sfp_10g; + uint8_t sfp_1g; + uint8_t sfp_cable_tech; + + bool lt_en; + bool an_en; + enum al_eth_mac_mode mac_mode; +}; + +/* Retimer parameters */ +struct al_eth_retimer_params { + al_bool exist; + uint8_t bus_id; + uint8_t i2c_addr; + enum al_eth_retimer_channel channel; +}; + +/* board specific private data structure */ +struct al_eth_adapter { + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + enum board_t board_type; + u16 dev_id; + u8 rev_id; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; +#define AL_ETH_FLAG_MSIX_CAPABLE (u32)(1 << 1) +#define AL_ETH_FLAG_MSIX_ENABLED (u32)(1 << 2) +#define AL_ETH_FLAG_IN_NETPOLL (u32)(1 << 3) +#define AL_ETH_FLAG_MQ_CAPABLE (u32)(1 << 4) +#define AL_ETH_FLAG_SRIOV_CAPABLE (u32)(1 << 5) +#define AL_ETH_FLAG_SRIOV_ENABLED (u32)(1 << 6) + + struct al_hal_eth_adapter hal_adapter; + + /* + * rx packets that shorter that this len will be copied to the skb + * header + */ + unsigned int small_copy_len; + + /* Maximum size for rx buffer */ + unsigned int max_rx_buff_alloc_size; + + /* Tx fast path data */ + int num_tx_queues; + + /* Rx fast path data */ + int num_rx_queues; + + /* TX */ + struct al_eth_ring tx_ring[AL_ETH_NUM_QUEUES] ____cacheline_aligned_in_smp; + + /* RX */ + struct al_eth_ring rx_ring[AL_ETH_NUM_QUEUES]; + +#define AL_ETH_RXQ_NAPI_IDX(adapter, q) (q) +#define AL_ETH_TXQ_NAPI_IDX(adapter, q) ((adapter)->num_rx_queues + (q)) + struct al_eth_napi al_napi[2 * AL_ETH_NUM_QUEUES]; + + enum al_iofic_mode int_mode; + +#define AL_ETH_MGMT_IRQ_IDX 0 +#define AL_ETH_RXQ_IRQ_IDX(adapter, q) (1 + (q)) +#define AL_ETH_TXQ_IRQ_IDX(adapter, q) (1 + (adapter)->num_rx_queues + (q)) + struct al_eth_irq irq_tbl[AL_ETH_MAX_MSIX_VEC]; + struct msix_entry *msix_entries; + int msix_vecs; + int irq_vecs; + + unsigned int tx_usecs, rx_usecs; /* interrupt coalescing */ + + unsigned int tx_ring_count; + unsigned int tx_descs_count; + unsigned int rx_ring_count; + unsigned int rx_descs_count; + + /* RSS*/ + uint32_t toeplitz_hash_key[AL_ETH_RX_HASH_KEY_NUM]; +#define AL_ETH_RX_RSS_TABLE_SIZE AL_ETH_RX_THASH_TABLE_SIZE + uint8_t rss_ind_tbl[AL_ETH_RX_RSS_TABLE_SIZE]; + + uint32_t msg_enable; + struct al_eth_mac_stats mac_stats; + struct al_eth_ec_stats ec_stats; + + enum al_eth_mac_mode mac_mode; + bool mac_mode_set; /* Relevant only when 'auto_speed' is set */ + u8 mac_addr[ETH_ALEN]; + /* mdio and phy*/ + bool phy_exist; + struct mii_bus *mdio_bus; + struct phy_device *phydev; + uint8_t phy_addr; + struct al_eth_link_config link_config; + + /* HAL layer data */ + int id_number; + char name[AL_ETH_NAME_MAX_LEN]; + void __iomem *internal_pcie_base; /* use for ALPINE_NIC devices */ + void __iomem *udma_base; + void __iomem *ec_base; + void __iomem *mac_base; + + struct al_eth_flow_control_params flow_ctrl_params; + + struct al_eth_adapter_params eth_hal_params; + + struct delayed_work link_status_task; + uint32_t link_poll_interval; /* task interval in mSec */ + + bool serdes_init; + struct al_serdes_obj serdes_obj; + uint8_t serdes_grp; + uint8_t serdes_lane; + + bool an_en; /* run kr auto-negotiation */ + bool lt_en; /* run kr link-training */ + + al_bool sfp_detection_needed; /**< true if need to run sfp detection */ + bool auto_speed; /**< true if allowed to change SerDes speed configuration */ + uint8_t i2c_adapter_id; /**< identifier for the i2c adapter to use to access SFP+ module */ + enum al_eth_ref_clk_freq ref_clk_freq; /**< reference clock frequency */ + unsigned int mdio_freq; /**< MDIO frequency [Khz] */ + enum al_eth_board_ext_phy_if phy_if; + + bool up; + + bool last_link; + bool last_establish_failed; + struct al_eth_lm_context lm_context; + bool use_lm; + + bool dont_override_serdes; /**< avoid overriding serdes parameters + to preset static values */ + spinlock_t serdes_config_lock; + + uint32_t wol; + + struct al_eth_retimer_params retimer; + + bool phy_fixup_needed; + + struct mii_bus *sw_mdio_bus; +}; + +#endif /* !(AL_ETH_H) */ diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_eth_sysfs.c b/target/linux/alpine/files/drivers/net/ethernet/al/al_eth_sysfs.c new file mode 100644 index 00000000000000..3d4717467eb357 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_eth_sysfs.c @@ -0,0 +1,564 @@ +/* al_eth_sysfs.c: AnnapurnaLabs Unified 1GbE and 10GbE ethernet driver. + * + * Copyright (c) 2013 AnnapurnaLabs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +#include +#include +#include "al_eth.h" + +#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) + +static int al_eth_validate_small_copy_len(struct al_eth_adapter *adapter, + unsigned long len) +{ + if (len > adapter->netdev->mtu) + return -EINVAL; + + return 0; +} + +static ssize_t al_eth_store_small_copy_len(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + unsigned long small_copy_len; + int err; + + err = kstrtoul(buf, 10, &small_copy_len); + if (err < 0) + return err; + + err = al_eth_validate_small_copy_len(adapter, small_copy_len); + if (err) + return err; + + rtnl_lock(); + adapter->small_copy_len = small_copy_len; + rtnl_unlock(); + + return len; +} + +static ssize_t al_eth_show_small_copy_len(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", adapter->small_copy_len); +} + +static struct device_attribute dev_attr_small_copy_len = { + .attr = {.name = "small_copy_len", .mode = (S_IRUGO | S_IWUSR)}, + .show = al_eth_show_small_copy_len, + .store = al_eth_store_small_copy_len, +}; + +static ssize_t al_eth_store_link_poll_interval(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + unsigned long link_poll_interval; + int err; + + err = kstrtoul(buf, 10, &link_poll_interval); + if (err < 0) + return err; + + adapter->link_poll_interval = link_poll_interval; + + return len; +} + +static ssize_t al_eth_show_link_poll_interval(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", adapter->link_poll_interval); +} + +static struct device_attribute dev_attr_link_poll_interval = { + .attr = {.name = "link_poll_interval", .mode = (S_IRUGO | S_IWUSR)}, + .show = al_eth_show_link_poll_interval, + .store = al_eth_store_link_poll_interval, +}; + +static ssize_t al_eth_store_link_management_debug(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + unsigned long link_management_debug; + int err; + + err = kstrtoul(buf, 10, &link_management_debug); + if (err < 0) + return err; + + al_eth_lm_debug_mode_set(&adapter->lm_context, + (link_management_debug) ? true : false); + + return len; +} + +static struct device_attribute dev_attr_link_management_debug = { + .attr = {.name = "link_management_debug", .mode = S_IWUSR}, + .show = NULL, + .store = al_eth_store_link_management_debug, +}; + +static ssize_t al_eth_store_link_training_enable(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + unsigned long link_training_enable; + int err; + + err = kstrtoul(buf, 10, &link_training_enable); + if (err < 0) + return err; + + adapter->lt_en = (link_training_enable == 0) ? AL_FALSE : AL_TRUE; + + if (adapter->up) + dev_warn(dev, + "%s this action will take place in the next activation (up)\n", + __func__); + + return len; +} + +static ssize_t al_eth_show_link_training_enable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", adapter->lt_en); +} + +static struct device_attribute dev_attr_link_training_enable = { + .attr = {.name = "link_training_enable", .mode = (S_IRUGO | S_IWUSR)}, + .show = al_eth_show_link_training_enable, + .store = al_eth_store_link_training_enable, +}; + +static ssize_t al_eth_store_force_1000_base_x(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + unsigned long force_1000_base_x; + int err; + + err = kstrtoul(buf, 10, &force_1000_base_x); + if (err < 0) + return err; + + adapter->link_config.force_1000_base_x = + (force_1000_base_x == 0) ? AL_FALSE : AL_TRUE; + + if (adapter->up) + dev_warn(dev, + "%s this action will take place in the next activation (up)\n", + __func__); + + return len; +} + +static ssize_t al_eth_show_force_1000_base_x(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", adapter->link_config.force_1000_base_x); +} + +static struct device_attribute dev_attr_force_1000_base_x = { + .attr = {.name = "force_1000_base_x", .mode = (S_IRUGO | S_IWUSR)}, + .show = al_eth_show_force_1000_base_x, + .store = al_eth_store_force_1000_base_x, +}; + +static ssize_t al_eth_store_serdes_tx_param(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + struct dev_ext_attribute *ea = to_ext_attr(attr); + uintptr_t offset = (uintptr_t)ea->var; + struct al_serdes_adv_tx_params tx_params; + unsigned long param; + int err; + + err = kstrtoul(buf, 16, ¶m); + if (err < 0) + return err; + + spin_lock(&adapter->serdes_config_lock); + + al_eth_lm_static_parameters_get(&adapter->lm_context, + &tx_params, + NULL); + + *(((uint8_t *)&tx_params) + offset) = param; + + spin_unlock(&adapter->serdes_config_lock); + + return len; +} + +static ssize_t al_eth_show_serdes_tx_param(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + struct dev_ext_attribute *ea = to_ext_attr(attr); + uintptr_t offset = (uintptr_t)ea->var; + struct al_serdes_adv_tx_params tx_params; + uint8_t val; + + al_eth_lm_static_parameters_get(&adapter->lm_context, + &tx_params, + NULL); + + val = *(((uint8_t *)&tx_params) + offset); + + return sprintf(buf, "%d\n", val); +} + +static ssize_t al_eth_store_serdes_rx_param(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + struct dev_ext_attribute *ea = to_ext_attr(attr); + uintptr_t offset = (uintptr_t)ea->var; + struct al_serdes_adv_rx_params rx_params; + unsigned long param; + int err; + + err = kstrtoul(buf, 16, ¶m); + if (err < 0) + return err; + + spin_lock(&adapter->serdes_config_lock); + + al_eth_lm_static_parameters_get(&adapter->lm_context, + NULL, + &rx_params); + + *(((uint8_t *)&rx_params) + offset) = param; + + spin_unlock(&adapter->serdes_config_lock); + + return len; +} + +static ssize_t al_eth_show_serdes_rx_param(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + struct dev_ext_attribute *ea = to_ext_attr(attr); + uintptr_t offset = (uintptr_t)ea->var; + struct al_serdes_adv_rx_params rx_params; + uint8_t val; + + al_eth_lm_static_parameters_get(&adapter->lm_context, + NULL, + &rx_params); + + val = *(((uint8_t *)&rx_params) + offset); + + return sprintf(buf, "%d\n", val); +} + +#define AL_ETH_SERDES_TX_PARAMS_ATTR(_name) { \ + __ATTR(serdes_tx_##_name, 0660, \ + al_eth_show_serdes_tx_param, al_eth_store_serdes_tx_param), \ + (void*)offsetof(struct al_serdes_adv_tx_params, _name) } + +#define AL_ETH_SERDES_RX_PARAMS_ATTR(_name) { \ + __ATTR(serdes_rx_##_name, 0660, \ + al_eth_show_serdes_rx_param, al_eth_store_serdes_rx_param), \ + (void*)offsetof(struct al_serdes_adv_rx_params, _name) } + +struct dev_ext_attribute dev_attr_serdes_params[] = { + AL_ETH_SERDES_TX_PARAMS_ATTR(amp), + AL_ETH_SERDES_TX_PARAMS_ATTR(total_driver_units), + AL_ETH_SERDES_TX_PARAMS_ATTR(c_plus_1), + AL_ETH_SERDES_TX_PARAMS_ATTR(c_plus_2), + AL_ETH_SERDES_TX_PARAMS_ATTR(c_minus_1), + AL_ETH_SERDES_TX_PARAMS_ATTR(slew_rate), + AL_ETH_SERDES_RX_PARAMS_ATTR(dcgain), + AL_ETH_SERDES_RX_PARAMS_ATTR(dfe_3db_freq), + AL_ETH_SERDES_RX_PARAMS_ATTR(dfe_gain), + AL_ETH_SERDES_RX_PARAMS_ATTR(dfe_first_tap_ctrl), + AL_ETH_SERDES_RX_PARAMS_ATTR(dfe_secound_tap_ctrl), + AL_ETH_SERDES_RX_PARAMS_ATTR(dfe_third_tap_ctrl), + AL_ETH_SERDES_RX_PARAMS_ATTR(dfe_fourth_tap_ctrl), + AL_ETH_SERDES_RX_PARAMS_ATTR(low_freq_agc_gain), + AL_ETH_SERDES_RX_PARAMS_ATTR(precal_code_sel), + AL_ETH_SERDES_RX_PARAMS_ATTR(high_freq_agc_boost) +}; + +static ssize_t al_eth_store_max_rx_buff_alloc_size(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + unsigned long max_rx_buff_alloc_size; + int err; + + err = kstrtoul(buf, 10, &max_rx_buff_alloc_size); + if (err < 0) + return err; + + adapter->max_rx_buff_alloc_size = max_rx_buff_alloc_size; + + return len; +} + +static ssize_t al_eth_show_max_rx_buff_alloc_size(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", adapter->max_rx_buff_alloc_size); +} + +static struct device_attribute dev_attr_max_rx_buff_alloc_size = { + .attr = {.name = "max_rx_buff_alloc_size", .mode = (S_IRUGO | S_IWUSR)}, + .show = al_eth_show_max_rx_buff_alloc_size, + .store = al_eth_store_max_rx_buff_alloc_size, +}; + + + +#define UDMA_DUMP_PREP_ATTR(_name, _type) {\ + __ATTR(udma_dump_##_name, 0660, rd_udma_dump, wr_udma_dump),\ + (void*)_type } + +enum udma_dump_type { + UDMA_DUMP_M2S_REGS, + UDMA_DUMP_M2S_Q_STRUCT, + UDMA_DUMP_M2S_Q_POINTERS, + UDMA_DUMP_S2M_REGS, + UDMA_DUMP_S2M_Q_STRUCT, + UDMA_DUMP_S2M_Q_POINTERS +}; + +static ssize_t rd_udma_dump( + struct device *dev, + struct device_attribute *attr, + char *buf); + +static ssize_t wr_udma_dump( + struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count); + + +/* Device attrs - udma debug */ +static struct dev_ext_attribute dev_attr_udma_debug[] = { + UDMA_DUMP_PREP_ATTR(m2s_regs, UDMA_DUMP_M2S_REGS), + UDMA_DUMP_PREP_ATTR(m2s_q_struct, UDMA_DUMP_M2S_Q_STRUCT), + UDMA_DUMP_PREP_ATTR(m2s_q_pointers, UDMA_DUMP_M2S_Q_POINTERS), + UDMA_DUMP_PREP_ATTR(s2m_regs, UDMA_DUMP_S2M_REGS), + UDMA_DUMP_PREP_ATTR(s2m_q_struct, UDMA_DUMP_S2M_Q_STRUCT), + UDMA_DUMP_PREP_ATTR(s2m_q_pointers, UDMA_DUMP_S2M_Q_POINTERS) +}; + +/****************************************************************************** + *****************************************************************************/ +int al_eth_sysfs_init( + struct device *dev) +{ + int status = 0; + + int i; + + if (device_create_file(dev, &dev_attr_small_copy_len)) + dev_info(dev, "failed to create small_copy_len sysfs entry"); + + if (device_create_file(dev, &dev_attr_link_poll_interval)) + dev_info(dev, "failed to create link_poll_interval sysfs entry"); + + if (device_create_file(dev, &dev_attr_link_management_debug)) + dev_info(dev, "failed to create link_management_debug sysfs entry"); + + if (device_create_file(dev, &dev_attr_max_rx_buff_alloc_size)) + dev_info(dev, "failed to create max_rx_buff_alloc_size sysfs entry"); + + if (device_create_file(dev, &dev_attr_link_training_enable)) + dev_info(dev, "failed to create link_training_enable sysfs entry"); + + if (device_create_file(dev, &dev_attr_force_1000_base_x)) + dev_info(dev, "failed to create force_1000_base_x sysfs entry"); + + for (i = 0 ; i < ARRAY_SIZE(dev_attr_serdes_params) ; i++) { + status = sysfs_create_file( + &dev->kobj, + &dev_attr_serdes_params[i].attr.attr); + if (status) { + dev_err(dev, "%s: sysfs_create_file(serdes_params %d) failed\n", + __func__, i); + goto done; + } + } + + for (i = 0; i < ARRAY_SIZE(dev_attr_udma_debug); i++ ) { + status = sysfs_create_file( + &dev->kobj, + &dev_attr_udma_debug[i].attr.attr); + if (status) { + dev_err( + dev, + "%s: sysfs_create_file(stats_udma %d) failed\n", + __func__, + i); + goto done; + } + } +done: + + return status; +} + +/****************************************************************************** + *****************************************************************************/ +void al_eth_sysfs_terminate( + struct device *dev) +{ + int i; + + device_remove_file(dev, &dev_attr_small_copy_len); + device_remove_file(dev, &dev_attr_link_poll_interval); + device_remove_file(dev, &dev_attr_link_management_debug); + device_remove_file(dev, &dev_attr_max_rx_buff_alloc_size); + device_remove_file(dev, &dev_attr_link_training_enable); + device_remove_file(dev, &dev_attr_force_1000_base_x); + + for (i = 0; i < ARRAY_SIZE(dev_attr_udma_debug); i++) + sysfs_remove_file( + &dev->kobj, + &dev_attr_udma_debug[i].attr.attr); + + for (i = 0; i < ARRAY_SIZE(dev_attr_serdes_params); i++ ) { + sysfs_remove_file( + &dev->kobj, + &dev_attr_serdes_params[i].attr.attr); + } + +} + +/****************************************************************************** + *****************************************************************************/ +static ssize_t rd_udma_dump( + struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + enum udma_dump_type dump_type = (enum udma_dump_type)ea->var; + ssize_t rc = 0; + + switch (dump_type) { + case UDMA_DUMP_M2S_REGS: + case UDMA_DUMP_S2M_REGS: + rc = sprintf( + buf, + "Write mask to dump corresponding udma regs\n"); + break; + case UDMA_DUMP_M2S_Q_STRUCT: + case UDMA_DUMP_S2M_Q_STRUCT: + rc = sprintf( + buf, + "Write q num to dump correspoding q struct\n"); + break; + case UDMA_DUMP_M2S_Q_POINTERS: + case UDMA_DUMP_S2M_Q_POINTERS: + rc = sprintf( + buf, + "Write q num (in hex) and add 1 for submission ring," + " for ex:\n" + "0 for completion ring of q 0\n" + "10 for submission ring of q 0\n"); + break; + default: + break; + } + + return rc; +} + +struct al_eth_adapter; +extern struct al_udma *al_eth_udma_get(struct al_eth_adapter *adapter, int tx); + +/****************************************************************************** + *****************************************************************************/ +static ssize_t wr_udma_dump( + struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int err; + int q_id; + unsigned long val; + struct dev_ext_attribute *ea = to_ext_attr(attr); + enum udma_dump_type dump_type = (enum udma_dump_type)ea->var; + enum al_udma_ring_type ring_type = AL_RING_COMPLETION; + struct al_eth_adapter *adapter = dev_get_drvdata(dev); + + err = kstrtoul(buf, 16, &val); + if (err < 0) + return err; + + switch (dump_type) { + case UDMA_DUMP_M2S_REGS: + al_udma_regs_print(al_eth_udma_get(adapter, 1), val); + break; + case UDMA_DUMP_S2M_REGS: + al_udma_regs_print(al_eth_udma_get(adapter, 0), val); + break; + case UDMA_DUMP_M2S_Q_STRUCT: + al_udma_q_struct_print(al_eth_udma_get(adapter, 1), val); + break; + case UDMA_DUMP_S2M_Q_STRUCT: + al_udma_q_struct_print(al_eth_udma_get(adapter, 0), val); + break; + case UDMA_DUMP_M2S_Q_POINTERS: + if (val & 0x10) + ring_type = AL_RING_SUBMISSION; + q_id = val & 0xf; + al_udma_ring_print(al_eth_udma_get(adapter, 1), q_id, ring_type); + break; + case UDMA_DUMP_S2M_Q_POINTERS: + if (val & 0x10) + ring_type = AL_RING_SUBMISSION; + q_id = val & 0xf; + al_udma_ring_print(al_eth_udma_get(adapter, 0), q_id, ring_type); + break; + default: + break; + } + + return count; +} + diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_eth_sysfs.h b/target/linux/alpine/files/drivers/net/ethernet/al/al_eth_sysfs.h new file mode 100644 index 00000000000000..d41f321097c205 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_eth_sysfs.h @@ -0,0 +1,27 @@ +/* + * al_eth_sysfs.h: AnnapurnaLabs Unified 1GbE and 10GbE ethernet driver. + * + * Copyright (C) 2014 Annapurna Labs Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef __AL_ETH_SYSFS_H__ +#define __AL_ETH_SYSFS_H__ + +int al_eth_sysfs_init(struct device *dev); + +void al_eth_sysfs_terminate(struct device *dev); + +#endif /* __AL_ETH_SYSFS_H__ */ + diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth.h b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth.h new file mode 100644 index 00000000000000..66505c902ed787 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth.h @@ -0,0 +1,2299 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_eth_api API + * Ethernet Controller HAL driver API + * @ingroup group_eth + * @{ + * @file al_hal_eth.h + * + * @brief Header file for Unified GbE and 10GbE Ethernet Controllers This is a + * common header file that covers both Standard and Advanced Controller + * + * + */ + +#ifndef __AL_HAL_ETH_H__ +#define __AL_HAL_ETH_H__ + +#include +#include +#include "al_hal_eth_macsec.h" +#ifdef AL_ETH_SUPPORT_DDP +#include "al_hal_eth_ddp.h" +#endif + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +#ifndef AL_ETH_PKT_MAX_BUFS +#define AL_ETH_PKT_MAX_BUFS 19 +#endif + +/* uncomment the following line to enable support of macsec */ +/*#define AL_ETH_SUPPORT_MACSEC */ + +#define AL_ETH_UDMA_TX_QUEUES 4 +#define AL_ETH_UDMA_RX_QUEUES 4 + +/* PCI Adapter Device/Revision ID */ +#define AL_ETH_DEV_ID_STANDARD 0x0001 +#define AL_ETH_DEV_ID_ADVANCED 0x0002 +#define AL_ETH_REV_ID_0 0 /* Alpine Rev 0 */ +#define AL_ETH_REV_ID_1 1 /* Alpine Rev 1 */ +#define AL_ETH_REV_ID_2 2 /* PeakRock basic */ +#define AL_ETH_REV_ID_3 3 /* PeakRock advanced */ + +/* PCI BARs */ +#define AL_ETH_UDMA_BAR 0 +#define AL_ETH_EC_BAR 4 +#define AL_ETH_MAC_BAR 2 + +#define AL_ETH_MAX_FRAME_LEN 10000 +#define AL_ETH_MIN_FRAME_LEN 60 + +#define AL_ETH_TSO_MSS_MAX_IDX 8 +#define AL_ETH_TSO_MSS_MIN_VAL 1 +/*TODO: update with correct value*/ +#define AL_ETH_TSO_MSS_MAX_VAL (AL_ETH_MAX_FRAME_LEN - 200) + +enum AL_ETH_PROTO_ID { + AL_ETH_PROTO_ID_UNKNOWN = 0, + AL_ETH_PROTO_ID_IPv4 = 8, + AL_ETH_PROTO_ID_IPv6 = 11, + AL_ETH_PROTO_ID_TCP = 12, + AL_ETH_PROTO_ID_UDP = 13, + AL_ETH_PROTO_ID_FCOE = 21, + AL_ETH_PROTO_ID_GRH = 22, /** RoCE l3 header */ + AL_ETH_PROTO_ID_BTH = 23, /** RoCE l4 header */ + AL_ETH_PROTO_ID_ANY = 32, /**< for sw usage only */ +}; +#define AL_ETH_PROTOCOLS_NUM (AL_ETH_PROTO_ID_ANY) + +enum AL_ETH_TX_TUNNEL_MODE { + AL_ETH_NO_TUNNELING = 0, + AL_ETH_TUNNEL_NO_UDP = 1, /* NVGRE / IP over IP */ + AL_ETH_TUNNEL_WITH_UDP = 3, /* VXLAN */ +}; + +#define AL_ETH_RX_THASH_TABLE_SIZE (1 << 8) +#define AL_ETH_RX_FSM_TABLE_SIZE (1 << 7) +#define AL_ETH_RX_CTRL_TABLE_SIZE (1 << 11) +#define AL_ETH_RX_HASH_KEY_NUM 10 +#define AL_ETH_FWD_MAC_NUM 32 +#define AL_ETH_FWD_MAC_HASH_NUM 256 +#define AL_ETH_FWD_PBITS_TABLE_NUM (1 << 3) +#define AL_ETH_FWD_PRIO_TABLE_NUM (1 << 3) +#define AL_ETH_FWD_VID_TABLE_NUM (1 << 12) +#define AL_ETH_FWD_DSCP_TABLE_NUM (1 << 8) +#define AL_ETH_FWD_TC_TABLE_NUM (1 << 8) + +/** MAC media mode */ +enum al_eth_mac_mode { + AL_ETH_MAC_MODE_RGMII, + AL_ETH_MAC_MODE_SGMII, + AL_ETH_MAC_MODE_SGMII_2_5G, + AL_ETH_MAC_MODE_10GbE_Serial, /**< Applies to XFI and KR modes */ + AL_ETH_MAC_MODE_10G_SGMII, /**< SGMII using the 10G MAC, don't use*/ + AL_ETH_MAC_MODE_XLG_LL_40G, /**< applies to 40G mode using the 40G low latency (LL) MAC */ + AL_ETH_MAC_MODE_KR_LL_25G, /**< applies to 25G mode using the 10/25G low latency (LL) MAC */ + AL_ETH_MAC_MODE_XLG_LL_50G /**< applies to 50G mode using the 40/50G low latency (LL) MAC */ +}; + +struct al_eth_capabilities { + al_bool speed_10_HD; + al_bool speed_10_FD; + al_bool speed_100_HD; + al_bool speed_100_FD; + al_bool speed_1000_HD; + al_bool speed_1000_FD; + al_bool speed_10000_HD; + al_bool speed_10000_FD; + al_bool pfc; /**< priority flow control */ + al_bool eee; /**< Energy Efficient Ethernet */ +}; + +/** interface type used for MDIO */ +enum al_eth_mdio_if { + AL_ETH_MDIO_IF_1G_MAC = 0, + AL_ETH_MDIO_IF_10G_MAC = 1 +}; + +/** MDIO protocol type */ +enum al_eth_mdio_type { + AL_ETH_MDIO_TYPE_CLAUSE_22 = 0, + AL_ETH_MDIO_TYPE_CLAUSE_45 = 1 +}; + +/** flow control mode */ +enum al_eth_flow_control_type { + AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE, + AL_ETH_FLOW_CONTROL_TYPE_PFC +}; + +/** Tx to Rx switching decision type */ +enum al_eth_tx_switch_dec_type { + AL_ETH_TX_SWITCH_TYPE_MAC = 0, + AL_ETH_TX_SWITCH_TYPE_VLAN_TABLE = 1, + AL_ETH_TX_SWITCH_TYPE_VLAN_TABLE_AND_MAC = 2, + AL_ETH_TX_SWITCH_TYPE_BITMAP = 3 +}; + +/** Tx to Rx VLAN ID selection type */ +enum al_eth_tx_switch_vid_sel_type { + AL_ETH_TX_SWITCH_VID_SEL_TYPE_VLAN1 = 0, + AL_ETH_TX_SWITCH_VID_SEL_TYPE_VLAN2 = 1, + AL_ETH_TX_SWITCH_VID_SEL_TYPE_NEW_VLAN1 = 2, + AL_ETH_TX_SWITCH_VID_SEL_TYPE_NEW_VLAN2 = 3, + AL_ETH_TX_SWITCH_VID_SEL_TYPE_DEFAULT_VLAN1 = 4, + AL_ETH_TX_SWITCH_VID_SEL_TYPE_FINAL_VLAN1 = 5 +}; + +/** Rx descriptor configurations */ +/* Note: when selecting rx descriptor field to inner packet, then that field +* will be set according to inner packet when packet is tunneled, for non-tunneled +* packets, the field will be set according to the packets header */ + +/** selection of the LRO_context_value result in the Metadata */ +enum al_eth_rx_desc_lro_context_val_res { + AL_ETH_LRO_CONTEXT_VALUE = 0, /**< LRO_context_value */ + AL_ETH_L4_OFFSET = 1, /**< L4_offset */ +}; + +/** selection of the L4 offset in the Metadata */ +enum al_eth_rx_desc_l4_offset_sel { + AL_ETH_L4_OFFSET_OUTER = 0, /**< set L4 offset of the outer packet */ + AL_ETH_L4_OFFSET_INNER = 1, /**< set L4 offset of the inner packet */ +}; + +/** selection of the L4 checksum result in the Metadata */ +enum al_eth_rx_desc_l4_chk_res_sel { + AL_ETH_L4_INNER_CHK = 0, /**< L4 checksum */ + AL_ETH_L4_INNER_OUTER_CHK = 1, /**< Logic AND between outer and inner + L4 checksum result */ +}; + +/** selection of the L3 checksum result in the Metadata */ +enum al_eth_rx_desc_l3_chk_res_sel { + AL_ETH_L3_CHK_TYPE_0 = 0, /**< L3 checksum */ + AL_ETH_L3_CHK_TYPE_1 = 1, /**< L3 checksum or RoCE/FCoE CRC, + based on outer header */ + AL_ETH_L3_CHK_TYPE_2 = 2, /**< If tunnel exist = 0, + L3 checksum or RoCE/FCoE CRC, + based on outer header. + Else, + logic AND between outer L3 checksum + (Ipv4) and inner CRC (RoCE or FcoE) */ + AL_ETH_L3_CHK_TYPE_3 = 3, /**< combination of the L3 checksum result and + CRC result,based on the checksum and + RoCE/FCoE CRC input selections. */ +}; + +/** selection of the L3 protocol index in the Metadata */ +enum al_eth_rx_desc_l3_proto_idx_sel { + AL_ETH_L3_PROTO_IDX_OUTER = 0, /**< set L3 proto index of the outer packet */ + AL_ETH_L3_PROTO_IDX_INNER = 1, /**< set L3 proto index of the inner packet */ +}; + +/** selection of the L3 offset in the Metadata */ +enum al_eth_rx_desc_l3_offset_sel { + AL_ETH_L3_OFFSET_OUTER = 0, /**< set L3 offset of the outer packet */ + AL_ETH_L3_OFFSET_INNER = 1, /**< set L3 offset of the inner packet */ +}; + + +/** selection of the L4 protocol index in the Metadata */ +enum al_eth_rx_desc_l4_proto_idx_sel { + AL_ETH_L4_PROTO_IDX_OUTER = 0, /**< set L4 proto index of the outer packet */ + AL_ETH_L4_PROTO_IDX_INNER = 1, /**< set L4 proto index of the inner packet */ +}; + +/** selection of the frag indication in the Metadata */ +enum al_eth_rx_desc_frag_sel { + AL_ETH_FRAG_OUTER = 0, /**< set frag of the outer packet */ + AL_ETH_FRAG_INNER = 1, /**< set frag of the inner packet */ +}; + +/** Ethernet Rx completion descriptor */ +typedef struct { + uint32_t ctrl_meta; + uint32_t len; + uint32_t word2; + uint32_t word3; +} al_eth_rx_cdesc; + +/** Flow Contol parameters */ +struct al_eth_flow_control_params{ + enum al_eth_flow_control_type type; /**< flow control type */ + al_bool obay_enable; /**< stop tx when pause received */ + al_bool gen_enable; /**< generate pause frames */ + uint16_t rx_fifo_th_high; + uint16_t rx_fifo_th_low; + uint16_t quanta; + uint16_t quanta_th; + uint8_t prio_q_map[4][8]; /**< for each UDMA, defines the mapping between + * PFC priority and queues(in bit mask). + * same mapping used for obay and generation. + * for example: + * if prio_q_map[1][7] = 0xC, then TX queues 2 + * and 3 of UDMA 1 will be stopped when pause + * received with priority 7, also, when RX queues + * 2 and 3 of UDMA 1 become almost full, then + * pause frame with priority 7 will be sent. + * + *note: + * 1) if specific a queue is not used, the caller must + * make set the prio_q_map to 0 otherwise that queue + * will make the controller keep sending PAUSE packets. + * 2) queues of unused UDMA must be treated as above. + * 3) when working in LINK PAUSE mode, only entries at + * priority 0 will be considered. + */ +}; + +/* Packet Tx flags */ +#define AL_ETH_TX_FLAGS_TSO AL_BIT(7) /**< Enable TCP/UDP segmentation offloading */ +#define AL_ETH_TX_FLAGS_IPV4_L3_CSUM AL_BIT(13) /**< Enable IPv4 header checksum calculation */ +#define AL_ETH_TX_FLAGS_L4_CSUM AL_BIT(14) /**< Enable TCP/UDP checksum calculation */ +#define AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM AL_BIT(17) /**< L4 partial checksum calculation */ +#define AL_ETH_TX_FLAGS_L2_MACSEC_PKT AL_BIT(16) /**< L2 Packet type 802_3 or 802_3_MACSEC */ +#define AL_ETH_TX_FLAGS_L2_DIS_FCS AL_BIT(15) /**< Disable CRC calculation*/ +#define AL_ETH_TX_FLAGS_TS AL_BIT(21) /**< Timestamp the packet */ + +#define AL_ETH_TX_FLAGS_INT AL_M2S_DESC_INT_EN +#define AL_ETH_TX_FLAGS_NO_SNOOP AL_M2S_DESC_NO_SNOOP_H + +/** this structure used for tx packet meta data */ +struct al_eth_meta_data{ + uint8_t store :1; /**< store the meta into the queues cache */ + uint8_t words_valid :4; /**< valid bit per word */ + + uint8_t vlan1_cfi_sel:2; + uint8_t vlan2_vid_sel:2; + uint8_t vlan2_cfi_sel:2; + uint8_t vlan2_pbits_sel:2; + uint8_t vlan2_ether_sel:2; + + uint16_t vlan1_new_vid:12; + uint8_t vlan1_new_cfi :1; + uint8_t vlan1_new_pbits :3; + uint16_t vlan2_new_vid:12; + uint8_t vlan2_new_cfi :1; + uint8_t vlan2_new_pbits :3; + + uint8_t l3_header_len; /**< in bytes */ + uint8_t l3_header_offset; + uint8_t l4_header_len; /**< in words(32-bits) */ + + /* rev 0 specific */ + uint8_t mss_idx_sel:3; /**< for TSO, select the register that holds the MSS */ + + /* rev 1 specific */ + uint8_t ts_index:4; /**< index of regiser where to store the tx timestamp */ + uint16_t mss_val :14; /**< for TSO, set the mss value */ + uint8_t outer_l3_offset; /**< for tunneling mode. up to 64 bytes */ + uint8_t outer_l3_len; /**< for tunneling mode. up to 128 bytes */ +}; + +/* Packet Rx flags when adding buffer to receive queue */ + +/**< + * VMID to be assigned to the packet descriptors + * Requires VMID in descriptor to be enabled for the specific UDMA + * queue. + */ +#define AL_ETH_RX_FLAGS_VMID_MASK AL_FIELD_MASK(15, 0) +#define AL_ETH_RX_FLAGS_NO_SNOOP AL_M2S_DESC_NO_SNOOP_H +#define AL_ETH_RX_FLAGS_INT AL_M2S_DESC_INT_EN +#define AL_ETH_RX_FLAGS_DUAL_BUF AL_BIT(31) + +/* Packet Rx flags set by HW when receiving packet */ +#define AL_ETH_RX_ERROR AL_BIT(16) /**< layer 2 errors (FCS, bad len, etc) */ +#define AL_ETH_RX_FLAGS_L4_CSUM_ERR AL_BIT(14) +#define AL_ETH_RX_FLAGS_L3_CSUM_ERR AL_BIT(13) + +/** packet structure. used for packet transmission and reception */ +struct al_eth_pkt{ + uint32_t flags; /**< see flags above, depends on context(tx or rx) */ + enum AL_ETH_PROTO_ID l3_proto_idx; + enum AL_ETH_PROTO_ID l4_proto_idx; + uint8_t source_vlan_count:2; + uint8_t vlan_mod_add_count:2; + uint8_t vlan_mod_del_count:2; + uint8_t vlan_mod_v1_ether_sel:2; + uint8_t vlan_mod_v1_vid_sel:2; + uint8_t vlan_mod_v1_pbits_sel:2; + + /* rev 1 specific */ + enum AL_ETH_TX_TUNNEL_MODE tunnel_mode; + enum AL_ETH_PROTO_ID outer_l3_proto_idx; /**< for tunneling mode */ + + /**< + * VMID to be assigned to the packet descriptors + * Requires VMID in descriptor to be enabled for the specific UDMA + * queue. + */ + uint16_t vmid; + + uint32_t rx_header_len; /**< header buffer length of rx packet, not used */ + struct al_eth_meta_data *meta; /**< if null, then no meta added */ +#ifdef AL_ETH_RX_DESC_RAW_GET + uint32_t rx_desc_raw[4]; +#endif + uint16_t rxhash; + uint16_t l3_offset; + +#ifdef AL_ETH_SUPPORT_DDP + struct al_eth_ext_metadata *ext_meta_data; +#endif + +#ifdef AL_ETH_SUPPORT_MACSEC + /** MacSec */ + uint8_t macsec_secure_channel:6; /**< index of point-to-point secure channel */ + uint8_t macsec_association_number:2; /**< used for hot-swap of SAs (crypto parameters) */ + uint16_t macsec_secured_pyld_len:14; /**< secured payload length */ + uint16_t macsec_rx_flags; /**< see flags description in al_hal_eth_macsec.h, MACSEC_RX_FLAGS_* */ + al_bool macsec_encrypt; + al_bool macsec_sign; +#endif + uint8_t num_of_bufs; + struct al_buf bufs[AL_ETH_PKT_MAX_BUFS]; +}; + +struct al_ec_regs; + + +/** Ethernet Adapter private data structure used by this driver */ +struct al_hal_eth_adapter{ + uint16_t dev_id; /**tx_udma, qid, &udma_q); + + return al_udma_available_get(udma_q); +} + +/** + * prepare packet descriptors in tx queue. + * + * This functions prepares the descriptors for the given packet in the tx + * submission ring. the caller must call al_eth_tx_pkt_action() below + * in order to notify the hardware about the new descriptors. + * + * @param tx_dma_q pointer to UDMA tx queue + * @param pkt the packet to transmit + * + * @return number of descriptors used for this packet, 0 if no free + * room in the descriptors ring + */ +int al_eth_tx_pkt_prepare(struct al_udma_q *tx_dma_q, struct al_eth_pkt *pkt); + + +/** + * Trigger the DMA about previously added tx descriptors. + * + * @param tx_dma_q pointer to UDMA tx queue + * @param tx_descs number of descriptors to notify the DMA about. + * the tx_descs can be sum of descriptor numbers of multiple prepared packets, + * this way the caller can use this function to notify the DMA about multiple + * packets. + */ +void al_eth_tx_dma_action(struct al_udma_q *tx_dma_q, uint32_t tx_descs); + +/** + * get number of completed tx descriptors, upper layer should derive from + * this information which packets were completed. + * + * @param tx_dma_q pointer to UDMA tx queue + * + * @return number of completed tx descriptors. + */ +int al_eth_comp_tx_get(struct al_udma_q *tx_dma_q); + +/** + * configure a TSO MSS val + * + * the TSO MSS vals are preconfigured values for MSS stored in hardware and the + * packet could use them when not working in MSS explicit mode. + * @param adapter pointer to the private structure + * @param idx the mss index + * @param mss_val the MSS value + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_tso_mss_config(struct al_hal_eth_adapter *adapter, uint8_t idx, uint32_t mss_val); + +/* RX */ +/** + * Config the RX descriptor fields + * + * @param adapter pointer to the private structure + * @param lro_sel select LRO context or l4 offset + * @param l4_offset_sel select l4 offset source + * @param l4_sel select the l4 checksum result + * @param l3_sel select the l3 checksum result + * @param l3_proto_sel select the l3 protocol index source + * @param l4_proto_sel select the l4 protocol index source + * @param frag_sel select the frag indication source + */ +void al_eth_rx_desc_config( + struct al_hal_eth_adapter *adapter, + enum al_eth_rx_desc_lro_context_val_res lro_sel, + enum al_eth_rx_desc_l4_offset_sel l4_offset_sel, + enum al_eth_rx_desc_l3_offset_sel l3_offset_sel, + enum al_eth_rx_desc_l4_chk_res_sel l4_sel, + enum al_eth_rx_desc_l3_chk_res_sel l3_sel, + enum al_eth_rx_desc_l3_proto_idx_sel l3_proto_sel, + enum al_eth_rx_desc_l4_proto_idx_sel l4_proto_sel, + enum al_eth_rx_desc_frag_sel frag_sel); + +/** + * Configure RX header split + * + * @param adapter pointer to the private structure + * @param enable header split when AL_TRUE + * @param header_split_len length in bytes of the header split, this value used when + * CTRL TABLE header split len select is set to + * AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_REG, in this case the controller will + * store the first header_split_len bytes into buf2, then the rest (if any) into buf1. + * when CTRL_TABLE header split len select set to other value, then the header_len + * determined according to the parser, and the header_split_len parameter is not + * used. + * + * return 0 on success. otherwise on failure. + */ +int al_eth_rx_header_split_config(struct al_hal_eth_adapter *adapter, al_bool enable, uint32_t header_len); + +/** + * add buffer to receive queue + * + * @param rx_dma_q pointer to UDMA rx queue + * @param buf pointer to data buffer + * @param flags bitwise of AL_ETH_RX_FLAGS + * @param header_buf this is not used for far and header_buf should be set to + * NULL. + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_rx_buffer_add(struct al_udma_q *rx_dma_q, + struct al_buf *buf, uint32_t flags, + struct al_buf *header_buf); + +/** + * notify the hw engine about rx descriptors that were added to the receive queue + * + * @param rx_dma_q pointer to UDMA rx queue + * @param descs_num number of rx descriptors + */ +void al_eth_rx_buffer_action(struct al_udma_q *rx_dma_q, + uint32_t descs_num); + +/** + * get packet from RX completion ring + * + * @param rx_dma_q pointer to UDMA rx queue + * @param pkt pointer to a packet data structure, this function fills this + * structure with the information about the received packet. the buffers + * structures filled only with the length of the data written into the buffer, + * the address fields are not updated as the upper layer can retrieve this + * information by itself because the hardware uses the buffers in the same order + * were those buffers inserted into the ring of the receive queue. + * this structure should be allocated by the caller function. + * + * @return return number of descriptors or 0 if no completed packet found. + */ + uint32_t al_eth_pkt_rx(struct al_udma_q *rx_dma_q, struct al_eth_pkt *pkt); + + +/* RX parser table */ +struct al_eth_epe_p_reg_entry { + uint32_t data; + uint32_t mask; + uint32_t ctrl; +}; + +struct al_eth_epe_control_entry { + uint32_t data[6]; +}; + +/** + * update rx parser entry + * + * @param adapter pointer to the private structure + * @param idx the protocol index to update + * @param reg_entry contents of parser register entry + * @param control entry contents of control table entry + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_rx_parser_entry_update(struct al_hal_eth_adapter *adapter, uint32_t idx, + struct al_eth_epe_p_reg_entry *reg_entry, + struct al_eth_epe_control_entry *control_entry); + +/* Flow Steering and filtering */ +int al_eth_thash_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t udma, uint32_t queue); + +/* FSM table bits */ +/** FSM table has 7 bits input address: + * bits[2:0] are the outer packet's type (IPv4, TCP...) + * bits[5:3] are the inner packet's type + * bit[6] is set when packet is tunneled. + * + * The output of each entry: + * bits[1:0] - input selection: selects the input for the thash (2/4 tuple, inner/outer) + * bit[2] - selects whether to use thash output, or default values for the queue and udma + * bits[6:3] default UDMA mask: the UDMAs to select when bit 2 above was unset + * bits[9:5] defualt queue: the queue index to select when bit 2 above was unset + */ + +#define AL_ETH_FSM_ENTRY_IPV4_TCP 0 +#define AL_ETH_FSM_ENTRY_IPV4_UDP 1 +#define AL_ETH_FSM_ENTRY_IPV6_TCP 2 +#define AL_ETH_FSM_ENTRY_IPV6_UDP 3 +#define AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP 4 +#define AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP 5 +#define AL_ETH_FSM_ENTRY_IPV4_FRAGMENTED 6 +#define AL_ETH_FSM_ENTRY_NOT_IP 7 + +#define AL_ETH_FSM_ENTRY_OUTER(idx) ((idx) & 7) +#define AL_ETH_FSM_ENTRY_INNER(idx) (((idx) >> 3) & 7) +#define AL_ETH_FSM_ENTRY_TUNNELED(idx) (((idx) >> 6) & 1) + +/* FSM DATA format */ +#define AL_ETH_FSM_DATA_OUTER_2_TUPLE 0 +#define AL_ETH_FSM_DATA_OUTER_4_TUPLE 1 +#define AL_ETH_FSM_DATA_INNER_2_TUPLE 2 +#define AL_ETH_FSM_DATA_INNER_4_TUPLE 3 + +#define AL_ETH_FSM_DATA_HASH_SEL (1 << 2) + +#define AL_ETH_FSM_DATA_DEFAULT_Q_SHIFT 5 +#define AL_ETH_FSM_DATA_DEFAULT_UDMA_SHIFT 3 + +/* set fsm table entry */ +int al_eth_fsm_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t entry); + +enum AL_ETH_FWD_CTRL_IDX_VLAN_TABLE_OUT { + AL_ETH_FWD_CTRL_IDX_VLAN_TABLE_OUT_0 = 0, + AL_ETH_FWD_CTRL_IDX_VLAN_TABLE_OUT_1 = 1, + AL_ETH_FWD_CTRL_IDX_VLAN_TABLE_OUT_ANY = 2, +}; + +enum AL_ETH_FWD_CTRL_IDX_TUNNEL { + AL_ETH_FWD_CTRL_IDX_TUNNEL_NOT_EXIST = 0, + AL_ETH_FWD_CTRL_IDX_TUNNEL_EXIST = 1, + AL_ETH_FWD_CTRL_IDX_TUNNEL_ANY = 2, +}; + +enum AL_ETH_FWD_CTRL_IDX_VLAN { + AL_ETH_FWD_CTRL_IDX_VLAN_NOT_EXIST = 0, + AL_ETH_FWD_CTRL_IDX_VLAN_EXIST = 1, + AL_ETH_FWD_CTRL_IDX_VLAN_ANY = 2, +}; + +enum AL_ETH_FWD_CTRL_IDX_MAC_TABLE { + AL_ETH_FWD_CTRL_IDX_MAC_TABLE_NO_MATCH = 0, + AL_ETH_FWD_CTRL_IDX_MAC_TABLE_MATCH = 1, + AL_ETH_FWD_CTRL_IDX_MAC_TABLE_ANY = 2, +}; + +enum AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE { + AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE_UC = 0, /**< unicast */ + AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE_MC = 1, /**< multicast */ + AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE_BC = 2, /**< broadcast */ + AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE_ANY = 4, /**< for sw usage */ +}; + +/** + * This structure defines the index or group of indeces within the control table. + * each field has special enum value (with _ANY postfix) that indicates all + * possible values of that field. + */ +struct al_eth_fwd_ctrl_table_index { + enum AL_ETH_FWD_CTRL_IDX_VLAN_TABLE_OUT vlan_table_out; + enum AL_ETH_FWD_CTRL_IDX_TUNNEL tunnel_exist; + enum AL_ETH_FWD_CTRL_IDX_VLAN vlan_exist; + enum AL_ETH_FWD_CTRL_IDX_MAC_TABLE mac_table_match; + enum AL_ETH_PROTO_ID protocol_id; + enum AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE mac_type; +}; + +enum AL_ETH_CTRL_TABLE_PRIO_SEL { + AL_ETH_CTRL_TABLE_PRIO_SEL_PBITS_TABLE = 0, + AL_ETH_CTRL_TABLE_PRIO_SEL_DSCP_TABLE = 1, + AL_ETH_CTRL_TABLE_PRIO_SEL_TC_TABLE = 2, + AL_ETH_CTRL_TABLE_PRIO_SEL_REG1 = 3, + AL_ETH_CTRL_TABLE_PRIO_SEL_REG2 = 4, + AL_ETH_CTRL_TABLE_PRIO_SEL_REG3 = 5, + AL_ETH_CTRL_TABLE_PRIO_SEL_REG4 = 6, + AL_ETH_CTRL_TABLE_PRIO_SEL_REG5 = 7, + AL_ETH_CTRL_TABLE_PRIO_SEL_REG6 = 7, + AL_ETH_CTRL_TABLE_PRIO_SEL_REG7 = 9, + AL_ETH_CTRL_TABLE_PRIO_SEL_REG8 = 10, + AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_3 = 11, + AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0 = 12, +}; +/** where to select the initial queue from */ +enum AL_ETH_CTRL_TABLE_QUEUE_SEL_1 { + AL_ETH_CTRL_TABLE_QUEUE_SEL_1_PRIO_TABLE = 0, + AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE = 1, + AL_ETH_CTRL_TABLE_QUEUE_SEL_1_MAC_TABLE = 2, + AL_ETH_CTRL_TABLE_QUEUE_SEL_1_MHASH_TABLE = 3, + AL_ETH_CTRL_TABLE_QUEUE_SEL_1_REG1 = 4, + AL_ETH_CTRL_TABLE_QUEUE_SEL_1_REG2 = 5, + AL_ETH_CTRL_TABLE_QUEUE_SEL_1_REG3 = 6, + AL_ETH_CTRL_TABLE_QUEUE_SEL_1_REG4 = 7, + AL_ETH_CTRL_TABLE_QUEUE_SEL_1_VAL_3 = 12, + AL_ETH_CTRL_TABLE_QUEUE_SEL_1_VAL_0 = 13, +}; + +/** target queue will be built up from the priority and initial queue */ +enum AL_ETH_CTRL_TABLE_QUEUE_SEL_2 { + AL_ETH_CTRL_TABLE_QUEUE_SEL_2_PRIO_TABLE = 0, /**< target queue is the output of priority table */ + AL_ETH_CTRL_TABLE_QUEUE_SEL_2_PRIO = 1, /**< target queue is the priority */ + AL_ETH_CTRL_TABLE_QUEUE_SEL_2_PRIO_QUEUE = 2, /**< target queue is initial queue[0], priority[1] */ + AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO = 3, /**< target queue is the initial */ +}; + +enum AL_ETH_CTRL_TABLE_UDMA_SEL { + AL_ETH_CTRL_TABLE_UDMA_SEL_THASH_TABLE = 0, + AL_ETH_CTRL_TABLE_UDMA_SEL_THASH_AND_VLAN = 1, + AL_ETH_CTRL_TABLE_UDMA_SEL_VLAN_TABLE = 2, + AL_ETH_CTRL_TABLE_UDMA_SEL_VLAN_AND_MAC = 3, + AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE = 4, + AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_AND_MHASH = 5, + AL_ETH_CTRL_TABLE_UDMA_SEL_MHASH_TABLE = 6, + AL_ETH_CTRL_TABLE_UDMA_SEL_REG1 = 7, + AL_ETH_CTRL_TABLE_UDMA_SEL_REG2 = 8, + AL_ETH_CTRL_TABLE_UDMA_SEL_REG3 = 9, + AL_ETH_CTRL_TABLE_UDMA_SEL_REG4 = 10, + AL_ETH_CTRL_TABLE_UDMA_SEL_REG5 = 11, + AL_ETH_CTRL_TABLE_UDMA_SEL_REG6 = 12, + AL_ETH_CTRL_TABLE_UDMA_SEL_REG7 = 13, + AL_ETH_CTRL_TABLE_UDMA_SEL_REG8 = 14, + AL_ETH_CTRL_TABLE_UDMA_SEL_VAL_0 = 15, +}; + +enum AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL { + AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_0 = 0, + AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_REG = 1, /**< select header len from the hdr_split register (set by al_eth_rx_header_split_config())*/ + AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_OUTER_L3_OFFSET = 2, + AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_OUTER_L4_OFFSET = 3, + AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_TUNNEL_START_OFFSET = 4, + AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_INNER_L3_OFFSET = 5, + AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_INNER_L4_OFFSET = 6, +}; + +struct al_eth_fwd_ctrl_table_entry { + enum AL_ETH_CTRL_TABLE_PRIO_SEL prio_sel; + enum AL_ETH_CTRL_TABLE_QUEUE_SEL_1 queue_sel_1; /**< queue id source */ + enum AL_ETH_CTRL_TABLE_QUEUE_SEL_2 queue_sel_2; /**< mix queue id with priority */ + enum AL_ETH_CTRL_TABLE_UDMA_SEL udma_sel; + enum AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL hdr_split_len_sel; + al_bool filter; /**< set to AL_TRUE to enable filtering */ +}; +/** + * Configure default control table entry + * + * @param adapter pointer to the private structure + * @param use_table set to AL_TRUE if control table is used, when set to AL_FALSE + * then control table will be bypassed and the entry value will be used. + * @param entry defines the value to be used when bypassing control table. + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_ctrl_table_def_set(struct al_hal_eth_adapter *adapter, + al_bool use_table, + struct al_eth_fwd_ctrl_table_entry *entry); + +/** + * Configure control table entry + * + * @param adapter pointer to the private structure + * @param index the entry index within the control table. + * @param entry the value to write to the control table entry + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_ctrl_table_set(struct al_hal_eth_adapter *adapter, + struct al_eth_fwd_ctrl_table_index *index, + struct al_eth_fwd_ctrl_table_entry *entry); + +int al_eth_ctrl_table_raw_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t entry); +int al_eth_ctrl_table_def_raw_set(struct al_hal_eth_adapter *adapter, uint32_t val); + +/** + * Configure hash key initial registers + * Those registers define the initial key values, those values used for + * the THASH and MHASH hash functions. + * + * @param adapter pointer to the private structure + * @param idx the register index + * @param val the register value + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_hash_key_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t val); + +struct al_eth_fwd_mac_table_entry { + uint8_t addr[6]; /**< byte 0 is the first byte seen on the wire */ + uint8_t mask[6]; + al_bool tx_valid; + uint8_t tx_target; + al_bool rx_valid; + uint8_t udma_mask; /**< target udma */ + uint8_t qid; /**< target queue */ + al_bool filter; /**< set to AL_TRUE to enable filtering */ +}; + +/** + * Configure mac table entry + * The HW traverse this table and looks for match from lowest index, + * when the packets MAC DA & mask == addr, and the valid bit is set, then match occurs. + * + * @param adapter pointer to the private structure + * @param idx the entry index within the mac table. + * @param entry the contents of the MAC table entry + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_fwd_mac_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, + struct al_eth_fwd_mac_table_entry *entry); + +int al_eth_fwd_mac_addr_raw_set(struct al_hal_eth_adapter *adapter, uint32_t idx, + uint32_t addr_lo, uint32_t addr_hi, uint32_t mask_lo, uint32_t mask_hi); +int al_eth_fwd_mac_ctrl_raw_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t ctrl); + +int al_eth_mac_addr_store(void * __iomem ec_base, uint32_t idx, uint8_t *addr); +int al_eth_mac_addr_read(void * __iomem ec_base, uint32_t idx, uint8_t *addr); + +/** + * Configure pbits table entry + * The HW uses this table to translate between vlan pbits field to priority. + * The vlan pbits is used as the index of this table. + * + * @param adapter pointer to the private structure + * @param idx the entry index within the table. + * @param prio the priority to set for this entry + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_fwd_pbits_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t prio); + +/** + * Configure priority table entry + * The HW uses this table to translate between priority to queue index. + * The priority is used as the index of this table. + * + * @param adapter pointer to the private structure + * @param prio the entry index within the table. + * @param qid the queue index to set for this entry (priority). + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_fwd_priority_table_set(struct al_hal_eth_adapter *adapter, uint8_t prio, uint8_t qid); + +/** + * Configure DSCP table entry + * The HW uses this table to translate between IPv4 DSCP field to priority. + * The IPv4 byte 1 (DSCP+ECN) used as index to this table. + * + * @param adapter pointer to the private structure + * @param idx the entry index within the table. + * @param prio the queue index to set for this entry (priority). + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_fwd_dscp_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t prio); + +/** + * Configure TC table entry + * The HW uses this table to translate between IPv6 TC field to priority. + * The IPv6 TC used as index to this table. + * + * @param adapter pointer to the private structure + * @param idx the entry index within the table. + * @param prio the queue index to set for this entry (priority). + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_fwd_tc_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t prio); + +/** + * Configure MAC HASH table entry + * The HW uses 8 bits from the hash result on the MAC DA as index to this table. + * + * @param adapter pointer to the private structure + * @param idx the entry index within the table. + * @param udma_mask the target udma to set for this entry. + * @param qid the target queue index to set for this entry. + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_fwd_mhash_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t udma_mask, uint8_t qid); + +struct al_eth_fwd_vid_table_entry { + uint8_t control:1; /**< used as input for the control table */ + uint8_t filter:1; /**< set to 1 to enable filtering */ + uint8_t udma_mask:4; /**< target udmas */ +}; + +/** + * Configure default vlan table entry + * + * @param adapter pointer to the private structure + * @param use_table set to AL_TRUE if vlan table is used, when set to AL_FALSE + * then vid table will be bypassed and the default_entry value will be used. + * @param default_entry defines the value to be used when bypassing vid table. + * @param default_vlan defines the value will be used when untagget packet + * received. this value will be used only for steering and filtering control, + * the packet's data will not be changed. + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_fwd_vid_config_set(struct al_hal_eth_adapter *adapter, al_bool use_table, + struct al_eth_fwd_vid_table_entry *default_entry, + uint32_t default_vlan); +/** + * Configure vlan table entry + * + * @param adapter pointer to the private structure + * @param idx the entry index within the vlan table. The HW uses the vlan id + * field of the packet when accessing this table. + * @param entry the value to write to the vlan table entry + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_fwd_vid_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, + struct al_eth_fwd_vid_table_entry *entry); + + +/** + * Configure default UDMA register + * When the control table entry udma selection set to AL_ETH_CTRL_TABLE_UDMA_SEL_REG, + * then the target UDMA will be set according to the register n of the default + * UDMA registers. + * + * @param adapter pointer to the private structure + * @param idx the index of the default register. + * @param udma_mask the value of the register. + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_fwd_default_udma_config(struct al_hal_eth_adapter *adapter, uint32_t idx, + uint8_t udma_mask); + +/** + * Configure default queue register + * When the control table entry queue selection 1 set to AL_ETH_CTRL_TABLE_QUEUE_SEL_1_REG, + * then the target queue will be set according to the register n of the default + * queue registers. + * + * @param adapter pointer to the private structure + * @param idx the index of the default register. + * @param qid the value of the register. + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_fwd_default_queue_config(struct al_hal_eth_adapter *adapter, uint32_t idx, + uint8_t qid); + +/** + * Configure default priority register + * When the control table entry queue selection 1 set to AL_ETH_CTRL_TABLE_PRIO_SEL_1_REG, + * then the target priority will be set according to the register n of the default + * priority registers. + * + * @param adapter pointer to the private structure + * @param idx the index of the default register. + * @param prio the value of the register. + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_fwd_default_priority_config(struct al_hal_eth_adapter *adapter, uint32_t idx, + uint8_t prio); + + + +/* filter undetected MAC DA */ +#define AL_ETH_RFW_FILTER_UNDET_MAC (1 << 0) +/* filter specific MAC DA based on MAC table output */ +#define AL_ETH_RFW_FILTER_DET_MAC (1 << 1) +/* filter all tagged */ +#define AL_ETH_RFW_FILTER_TAGGED (1 << 2) +/* filter all untagged */ +#define AL_ETH_RFW_FILTER_UNTAGGED (1 << 3) +/* filter all broadcast */ +#define AL_ETH_RFW_FILTER_BC (1 << 4) +/* filter all multicast */ +#define AL_ETH_RFW_FILTER_MC (1 << 5) +/* filter packet based on parser drop */ +#define AL_ETH_RFW_FILTER_PARSE (1 << 6) +/* filter packet based on VLAN table output */ +#define AL_ETH_RFW_FILTER_VLAN_VID (1 << 7) +/* filter packet based on control table output */ +#define AL_ETH_RFW_FILTER_CTRL_TABLE (1 << 8) +/* filter packet based on protocol index */ +#define AL_ETH_RFW_FILTER_PROT_INDEX (1 << 9) +/* filter packet based on WoL decision */ +#define AL_ETH_RFW_FILTER_WOL (1 << 10) + + +struct al_eth_filter_params { + al_bool enable; + uint32_t filters; /**< bitmask of AL_ETH_RFW_FILTER.. for filters to enable */ + al_bool filter_proto[AL_ETH_PROTOCOLS_NUM]; /**< set AL_TRUE for protocols to filter */ +}; + +struct al_eth_filter_override_params { + uint32_t filters; /**< bitmask of AL_ETH_RFW_FILTER.. for filters to override */ + uint8_t udma; /**< target udma id */ + uint8_t qid; /**< target queue id */ +}; + +/** + * Configure the receive filters + * this function enables/disables filtering packets and which filtering + * types to apply. + * filters that indicated in tables (MAC table, VLAN and Control tables) + * are not configured by this function. This functions only enables/disables + * respecting the filter indication from those tables. + * + * @param adapter pointer to the private structure + * @param params the parameters passed from upper layer + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_filter_config(struct al_hal_eth_adapter *adapter, struct al_eth_filter_params *params); + +/** + * Configure the receive override filters + * This function controls whither to force forwarding filtered packets + * to a specific UDMA/queue. The override filters apply only for + * filters that enabled by al_eth_filter_config(). + * + * @param adapter pointer to the private structure + * @param params override config parameters + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_filter_override_config(struct al_hal_eth_adapter *adapter, + struct al_eth_filter_override_params *params); + + +int al_eth_switching_config_set(struct al_hal_eth_adapter *adapter, uint8_t udma_id, uint8_t forward_all_to_mac, uint8_t enable_int_switching, + enum al_eth_tx_switch_vid_sel_type vid_sel_type, + enum al_eth_tx_switch_dec_type uc_dec, + enum al_eth_tx_switch_dec_type mc_dec, + enum al_eth_tx_switch_dec_type bc_dec); +int al_eth_switching_default_bitmap_set(struct al_hal_eth_adapter *adapter, uint8_t udma_id, uint8_t udma_uc_bitmask, + uint8_t udma_mc_bitmask,uint8_t udma_bc_bitmask); +int al_eth_flow_control_config(struct al_hal_eth_adapter *adapter, struct al_eth_flow_control_params *params); + +struct al_eth_eee_params{ + uint8_t enable; + uint32_t tx_eee_timer; /**< time in cycles the interface delays prior to entering eee state */ + uint32_t min_interval; /**< minimum interval in cycles between two eee states */ + uint32_t stop_cnt; /**< time in cycles to stop Tx mac i/f after getting out of eee state */ +}; + +/** + * configure EEE mode + * @param adapter pointer to the private structure. + * @param params pointer to the eee input parameters. + * + * @return return 0 on success. otherwise on failure. + */ +int al_eth_eee_config(struct al_hal_eth_adapter *adapter, struct al_eth_eee_params *params); + +/** + * get EEE configuration + * @param adapter pointer to the private structure. + * @param params pointer to the eee output parameters. + * + * @return return 0 on success. otherwise on failure. + */ +int al_eth_eee_get(struct al_hal_eth_adapter *adapter, struct al_eth_eee_params *params); + +int al_eth_vlan_mod_config(struct al_hal_eth_adapter *adapter, uint8_t udma_id, uint16_t udma_etype, uint16_t vlan1_data, uint16_t vlan2_data); + +/* Timestamp + * This is a generic time-stamp mechanism that can be used as generic to + * time-stamp every received or transmit packet it can also support IEEE 1588v2 + * PTP time synchronization protocol. + * In addition to time-stamp, an internal system time is maintained. For + * further accuracy, the chip support transmit/receive clock synchronization + * including recovery of master clock from one of the ports and distributing it + * to the rest of the ports - that is outside the scope of the Ethernet + * Controller - please refer to Annapurna Labs Alpine Hardware Wiki + */ + +/* Timestamp management APIs */ + +/** + * prepare the adapter for timestamping packets. + * Rx timestamps requires using 8 words (8x4 bytes) rx completion descriptor + * size as the timestamp value added into word 4. + * + * This function should be called after al_eth_mac_config() and before + * enabling the queues. + * @param adapter pointer to the private structure. + * @return 0 on success. otherwise on failure. + */ +int al_eth_ts_init(struct al_hal_eth_adapter *adapter); + +/* Timestamp data path APIs */ + +/* + * This is the size of the on-chip array that keeps the time-stamp of the + * latest transmitted packets + */ +#define AL_ETH_PTH_TX_SAMPLES_NUM 16 + +/** + * read Timestamp sample value of previously transmitted packet. + * + * The adapter includes AL_ETH_PTH_TX_SAMPLES_NUM timestamp samples for tx + * packets, those samples shared for all the UDMAs and queues. the al_eth_pkt + * data structure includes the index of which sample to use for the packet + * to transmit. It's the caller's responsibility to manage those samples, + * for example, when using an index, the caller must make sure the packet + * is completed and the tx time is sampled before using that index for + * another packet. + * + * This function should be called after the completion indication of the + * tx packet. however, there is a little chance that the timestamp sample + * won't be updated yet, thus this function must be called again when it + * returns -EAGAIN. + * @param adapter pointer to the private structure. + * @param ts_index the index (out of 16) of the timestamp register + * @param timestamp the timestamp value in 2^18 femtoseconds resolution. + * @return -EAGAIN if the sample was not updated yet. 0 when the sample + * was updated and no errors found. + */ +int al_eth_tx_ts_val_get(struct al_hal_eth_adapter *adapter, uint8_t ts_index, + uint32_t *timestamp); + +/* Timestamp PTH (PTP Timestamp Handler) control and times management */ +/** structure for describing PTH epoch time */ +struct al_eth_pth_time { + uint32_t seconds; /**< seconds */ + uint64_t femto; /**< femto seconds */ +}; + +/** + * Read the systime value + * This API should not be used to get the timestamp of packets. + * The HW maintains 50 bits for the sub-seconds portion in femto resolution, + * but this function reads only the 32 MSB bits since the LSB provides + * sub-nanoseconds accuracy, which is not needed. + * @param adapter pointer to the private structure. + * @param systime pointer to structure where the time will be stored. + * @return 0 on success. otherwise on failure. + */ +int al_eth_pth_systime_read(struct al_hal_eth_adapter *adapter, + struct al_eth_pth_time *systime); + +/** + * Set the clock period to a given value. + * The systime will be incremented by this value on each posedge of the + * adapters internal clock which driven by the SouthBridge clock. + * @param adapter pointer to the private structure. + * @param clk_period the clock period in femto seconds. + * @return 0 on success. otherwise on failure. + */ +int al_eth_pth_clk_period_write(struct al_hal_eth_adapter *adapter, + uint64_t clk_period); + +/**< enum for methods when updating systime using triggers */ +enum al_eth_pth_update_method { + AL_ETH_PTH_UPDATE_METHOD_SET = 0, /**< Set the time in int/ext update time */ + AL_ETH_PTH_UPDATE_METHOD_INC = 1, /**< increment */ + AL_ETH_PTH_UPDATE_METHOD_DEC = 2, /**< decrement */ + AL_ETH_PTH_UPDATE_METHOD_ADD_TO_LAST = 3, /**< Set to last time + int/ext update time.*/ +}; + +/**< systime internal update trigger types */ +enum al_eth_pth_int_trig { + AL_ETH_PTH_INT_TRIG_OUT_PULSE_0 = 0, /**< use output pulse as trigger */ + AL_ETH_PTH_INT_TRIG_REG_WRITE = 1, /**< use the int update register + * write as a trigger + */ +}; + +/**< parameters for internal trigger update */ +struct al_eth_pth_int_update_params { + al_bool enable; /**< enable internal trigger update */ + enum al_eth_pth_update_method method; /**< internal trigger update + * method + */ + enum al_eth_pth_int_trig trigger; /**< which internal trigger to + * use + */ +}; + +/** + * Configure the systime internal update + * + * @param adapter pointer to the private structure. + * @param params the configuration of the internal update. + * @return 0 on success. otherwise on failure. + */ +int al_eth_pth_int_update_config(struct al_hal_eth_adapter *adapter, + struct al_eth_pth_int_update_params *params); + +/** + * set internal update time + * + * The update time used when updating the systime with + * internal update method. + * + * @param adapter pointer to the private structure. + * @param time the internal update time value + * @return 0 on success. otherwise on failure. + */ +int al_eth_pth_int_update_time_set(struct al_hal_eth_adapter *adapter, + struct al_eth_pth_time *time); + +/**< parameters for external trigger update */ +struct al_eth_pth_ext_update_params { + uint8_t triggers; /**< bitmask of external triggers to enable */ + enum al_eth_pth_update_method method; /**< external trigger update + * method + */ +}; + +/** + * Configure the systime external update. + * external update triggered by external signals such as GPIO or pulses + * from other eth controllers on the SoC. + * + * @param adapter pointer to the private structure. + * @param params the configuration of the external update. + * @return 0 on success. otherwise on failure. + */ +int al_eth_pth_ext_update_config(struct al_hal_eth_adapter *adapter, + struct al_eth_pth_ext_update_params *params); + +/** + * set external update time + * + * The update time used when updating the systime with + * external update method. + * @param adapter pointer to the private structure. + * @param time the external update time value + * @return 0 on success. otherwise on failure. + */ +int al_eth_pth_ext_update_time_set(struct al_hal_eth_adapter *adapter, + struct al_eth_pth_time *time); +/** + * set the read compensation delay + * + * When reading the systime, the HW adds this value to compensate + * read latency. + * + * @param adapter pointer to the private structure. + * @param subseconds the read latency delay in femto seconds. + * @return 0 on success. otherwise on failure. + */ +int al_eth_pth_read_compensation_set(struct al_hal_eth_adapter *adapter, + uint64_t subseconds); +/** + * set the internal write compensation delay + * + * When updating the systime due to an internal trigger's event, the HW adds + * this value to compensate latency. + * + * @param adapter pointer to the private structure. + * @param subseconds the write latency delay in femto seconds. + * @return 0 on success. otherwise on failure. + */ +int al_eth_pth_int_write_compensation_set(struct al_hal_eth_adapter *adapter, + uint64_t subseconds); + +/** + * set the external write compensation delay + * + * When updating the systime due to an external trigger's event, the HW adds + * this value to compensate pulse propagation latency. + * + * @param adapter pointer to the private structure. + * @param subseconds the write latency delay in femto seconds. + * @return 0 on success. otherwise on failure. + */ +int al_eth_pth_ext_write_compensation_set(struct al_hal_eth_adapter *adapter, + uint64_t subseconds); + +/** + * set the sync compensation delay + * + * When the adapter passes systime from PTH to MAC to do the packets + * timestamping, the sync compensation delay is added to systime value to + * compensate the latency between the PTH and the MAC. + * + * @param adapter pointer to the private structure. + * @param subseconds the sync latency delay in femto seconds. + * @return 0 on success. otherwise on failure. + */ +int al_eth_pth_sync_compensation_set(struct al_hal_eth_adapter *adapter, + uint64_t subseconds); + +#define AL_ETH_PTH_PULSE_OUT_NUM 8 +struct al_eth_pth_pulse_out_params { + uint8_t index; /**< id of the pulse (0..7) */ + al_bool enable; + al_bool periodic; /**< when true, generate periodic pulse (PPS) */ + uint8_t period_sec; /**< for periodic pulse, this is seconds + * portion of the period time + */ + uint32_t period_us; /**< this is microseconds portion of the + * period + */ + struct al_eth_pth_time start_time; /**< when to start pulse triggering */ + uint64_t pulse_width; /**< pulse width in femto seconds */ +}; + +/** + * Configure an output pulse + * This function configures an output pulse coming from the internal System + * Time. This is typically a 1Hhz pulse that is used to synchronize the + * rest of the components of the system. This API configure the Ethernet + * Controller pulse. An additional set up is required to configure the chip + * General Purpose I/O (GPIO) to enable the chip output pin. + * + * @param adapter pointer to the private structure. + * @param params output pulse configuration. + * @return 0 on success. otherwise on failure. + */ +int al_eth_pth_pulse_out_config(struct al_hal_eth_adapter *adapter, + struct al_eth_pth_pulse_out_params *params); + +/* link */ +struct al_eth_link_status { + al_bool link_up; +}; + +/** + * get link status + * + * this function should be used when no external phy is used to get + * information about the link + * + * @param adapter pointer to the private structure. + * @param status pointer to struct where to set link information + * + * @return return 0 on success. otherwise on failure. + */ +int al_eth_link_status_get(struct al_hal_eth_adapter *adapter, struct al_eth_link_status *status); + +/** + * Set LEDs to represent link status. + * + * @param adapter pointer to the private structure. + * @param link_is_up boolean indicating current link status. + * In case link is down the leds will be turned off. + * In case link is up the leds will be turned on, that means + * leds will be blinking on traffic and will be constantly lighting + * on inactive link + * @return return 0 on success. otherwise on failure. + */ +int al_eth_led_set(struct al_hal_eth_adapter *adapter, al_bool link_is_up); + +/* get statistics */ + +struct al_eth_mac_stats{ + /* sum the data and padding octets (i.e. without header and FCS) received with a valid frame. */ + uint64_t aOctetsReceivedOK; + /* sum of Payload and padding octets of frames transmitted without error*/ + uint64_t aOctetsTransmittedOK; + /* total number of packets received. Good and bad packets */ + uint32_t etherStatsPkts; + /* number of received unicast packets */ + uint32_t ifInUcastPkts; + /* number of received multicast packets */ + uint32_t ifInMulticastPkts; + /* number of received broadcast packets */ + uint32_t ifInBroadcastPkts; + /* Number of frames received with FIFO Overflow, CRC, Payload Length, Jabber and Oversized, Alignment or PHY/PCS error indication */ + uint32_t ifInErrors; + + /* number of transmitted unicast packets */ + uint32_t ifOutUcastPkts; + /* number of transmitted multicast packets */ + uint32_t ifOutMulticastPkts; + /* number of transmitted broadcast packets */ + uint32_t ifOutBroadcastPkts; + /* number of frames transmitted with FIFO Overflow, FIFO Underflow or Controller indicated error */ + uint32_t ifOutErrors; + + /* number of Frame received without error (Including Pause Frames). */ + uint32_t aFramesReceivedOK; + /* number of Frames transmitter without error (Including Pause Frames) */ + uint32_t aFramesTransmittedOK; + /* number of packets received with less than 64 octets */ + uint32_t etherStatsUndersizePkts; + /* Too short frames with CRC error, available only for RGMII and 1G Serial modes */ + uint32_t etherStatsFragments; + /* Too long frames with CRC error */ + uint32_t etherStatsJabbers; + /* packet that exceeds the valid maximum programmed frame length */ + uint32_t etherStatsOversizePkts; + /* number of frames received with a CRC error */ + uint32_t aFrameCheckSequenceErrors; + /* number of frames received with alignment error */ + uint32_t aAlignmentErrors; + /* number of dropped packets due to FIFO overflow */ + uint32_t etherStatsDropEvents; + /* number of transmitted pause frames. */ + uint32_t aPAUSEMACCtrlFramesTransmitted; + /* number of received pause frames. */ + uint32_t aPAUSEMACCtrlFramesReceived; + /* frame received exceeded the maximum length programmed with register FRM_LGTH, available only for 10G modes */ + uint32_t aFrameTooLongErrors; + /* received frame with bad length/type (between 46 and 0x600 or less + * than 46 for packets longer than 64), available only for 10G modes */ + uint32_t aInRangeLengthErrors; + /* Valid VLAN tagged frames transmitted */ + uint32_t VLANTransmittedOK; + /* Valid VLAN tagged frames received */ + uint32_t VLANReceivedOK; + /* Total number of octets received. Good and bad packets */ + uint32_t etherStatsOctets; + + /* packets of 64 octets length is received (good and bad frames are counted) */ + uint32_t etherStatsPkts64Octets; + /* Frames (good and bad) with 65 to 127 octets */ + uint32_t etherStatsPkts65to127Octets; + /* Frames (good and bad) with 128 to 255 octets */ + uint32_t etherStatsPkts128to255Octets; + /* Frames (good and bad) with 256 to 511 octets */ + uint32_t etherStatsPkts256to511Octets; + /* Frames (good and bad) with 512 to 1023 octets */ + uint32_t etherStatsPkts512to1023Octets; + /* Frames (good and bad) with 1024 to 1518 octets */ + uint32_t etherStatsPkts1024to1518Octets; + /* frames with 1519 bytes to the maximum length programmed in the register FRAME_LENGTH. */ + uint32_t etherStatsPkts1519toX; + + uint32_t eee_in; + uint32_t eee_out; +}; + +/** + * get mac statistics + * @param adapter pointer to the private structure. + * @param stats pointer to structure that will be filled with statistics. + * + * @return return 0 on success. otherwise on failure. + */ +int al_eth_mac_stats_get(struct al_hal_eth_adapter *adapter, struct al_eth_mac_stats *stats); + +struct al_eth_ec_stats{ + /* Rx Frequency adjust FIFO input packets */ + uint32_t faf_in_rx_pkt; + /* Rx Frequency adjust FIFO input short error packets */ + uint32_t faf_in_rx_short; + /* Rx Frequency adjust FIFO input long error packets */ + uint32_t faf_in_rx_long; + /* Rx Frequency adjust FIFO output packets */ + uint32_t faf_out_rx_pkt; + /* Rx Frequency adjust FIFO output short error packets */ + uint32_t faf_out_rx_short; + /* Rx Frequency adjust FIFO output long error packets */ + uint32_t faf_out_rx_long; + /* Rx Frequency adjust FIFO output drop packets */ + uint32_t faf_out_drop; + /* Number of packets written into the Rx FIFO (without FIFO error indication) */ + uint32_t rxf_in_rx_pkt; + /* Number of error packets written into the Rx FIFO (with FIFO error indication, */ + /* FIFO full indication during packet reception) */ + uint32_t rxf_in_fifo_err; + /* Number of packets read from Rx FIFO 1 */ + uint32_t lbf_in_rx_pkt; + /* Number of packets read from Rx FIFO 2 (loopback FIFO) */ + uint32_t lbf_in_fifo_err; + /* Rx FIFO output drop packets from FIFO 1 */ + uint32_t rxf_out_rx_1_pkt; + /* Rx FIFO output drop packets from FIFO 2 (loop back) */ + uint32_t rxf_out_rx_2_pkt; + /* Rx FIFO output drop packets from FIFO 1 */ + uint32_t rxf_out_drop_1_pkt; + /* Rx FIFO output drop packets from FIFO 2 (loop back) */ + uint32_t rxf_out_drop_2_pkt; + /* Rx Parser 1, input packet counter */ + uint32_t rpe_1_in_rx_pkt; + /* Rx Parser 1, output packet counter */ + uint32_t rpe_1_out_rx_pkt; + /* Rx Parser 2, input packet counter */ + uint32_t rpe_2_in_rx_pkt; + /* Rx Parser 2, output packet counter */ + uint32_t rpe_2_out_rx_pkt; + /* Rx Parser 3 (MACsec), input packet counter */ + uint32_t rpe_3_in_rx_pkt; + /* Rx Parser 3 (MACsec), output packet counter */ + uint32_t rpe_3_out_rx_pkt; + /* Tx parser, input packet counter */ + uint32_t tpe_in_tx_pkt; + /* Tx parser, output packet counter */ + uint32_t tpe_out_tx_pkt; + /* Tx packet modification, input packet counter */ + uint32_t tpm_tx_pkt; + /* Tx forwarding input packet counter */ + uint32_t tfw_in_tx_pkt; + /* Tx forwarding input packet counter */ + uint32_t tfw_out_tx_pkt; + /* Rx forwarding input packet counter */ + uint32_t rfw_in_rx_pkt; + /* Rx Forwarding, packet with VLAN command drop indication */ + uint32_t rfw_in_vlan_drop; + /* Rx Forwarding, packets with parse drop indication */ + uint32_t rfw_in_parse_drop; + /* Rx Forwarding, multicast packets */ + uint32_t rfw_in_mc; + /* Rx Forwarding, broadcast packets */ + uint32_t rfw_in_bc; + /* Rx Forwarding, tagged packets */ + uint32_t rfw_in_vlan_exist; + /* Rx Forwarding, untagged packets */ + uint32_t rfw_in_vlan_nexist; + /* Rx Forwarding, packets with MAC address drop indication (from the MAC address table) */ + uint32_t rfw_in_mac_drop; + /* Rx Forwarding, packets with undetected MAC address */ + uint32_t rfw_in_mac_ndet_drop; + /* Rx Forwarding, packets with drop indication from the control table */ + uint32_t rfw_in_ctrl_drop; + /* Rx Forwarding, packets with L3_protocol_index drop indication */ + uint32_t rfw_in_prot_i_drop; + /* EEE, number of times the system went into EEE state */ + uint32_t eee_in; +}; + +/** + * get ec statistics + * @param adapter pointer to the private structure. + * @param stats pointer to structure that will be filled with statistics. + * + * @return return 0 on success. otherwise on failure. + */ +int al_eth_ec_stats_get(struct al_hal_eth_adapter *adapter, struct al_eth_ec_stats *stats); + +struct al_eth_ec_stat_udma{ + /* Rx forwarding output packet counter */ + uint32_t rfw_out_rx_pkt; + /* Rx forwarding output drop packet counter */ + uint32_t rfw_out_drop; + /* Multi-stream write, number of Rx packets */ + uint32_t msw_in_rx_pkt; + /* Multi-stream write, number of dropped packets at SOP, Q full indication */ + uint32_t msw_drop_q_full; + /* Multi-stream write, number of dropped packets at SOP */ + uint32_t msw_drop_sop; + /* Multi-stream write, number of dropped packets at EOP, */ + /*EOP was written with error indication (not all packet data was written) */ + uint32_t msw_drop_eop; + /* Multi-stream write, number of packets written to the stream FIFO with EOP and without packet loss */ + uint32_t msw_wr_eop; + /* Multi-stream write, number of packets read from the FIFO into the stream */ + uint32_t msw_out_rx_pkt; + /* Number of transmitted packets without TSO enabled */ + uint32_t tso_no_tso_pkt; + /* Number of transmitted packets with TSO enabled */ + uint32_t tso_tso_pkt; + /* Number of TSO segments that were generated */ + uint32_t tso_seg_pkt; + /* Number of TSO segments that required padding */ + uint32_t tso_pad_pkt; + /* Tx Packet modification, MAC SA spoof error */ + uint32_t tpm_tx_spoof; + /* Tx MAC interface, input packet counter */ + uint32_t tmi_in_tx_pkt; + /* Tx MAC interface, number of packets forwarded to the MAC */ + uint32_t tmi_out_to_mac; + /* Tx MAC interface, number of packets forwarded to the Rx data path */ + uint32_t tmi_out_to_rx; + /* Tx MAC interface, number of transmitted bytes */ + uint32_t tx_q0_bytes; + /* Tx MAC interface, number of transmitted bytes */ + uint32_t tx_q1_bytes; + /* Tx MAC interface, number of transmitted bytes */ + uint32_t tx_q2_bytes; + /* Tx MAC interface, number of transmitted bytes */ + uint32_t tx_q3_bytes; + /* Tx MAC interface, number of transmitted packets */ + uint32_t tx_q0_pkts; + /* Tx MAC interface, number of transmitted packets */ + uint32_t tx_q1_pkts; + /* Tx MAC interface, number of transmitted packets */ + uint32_t tx_q2_pkts; + /* Tx MAC interface, number of transmitted packets */ + uint32_t tx_q3_pkts; +}; + +/** + * get per_udma statistics + * @param adapter pointer to the private structure. + * @param idx udma_id value + * @param stats pointer to structure that will be filled with statistics. + * + * @return return 0 on success. otherwise on failure. + */ +int al_eth_ec_stat_udma_get(struct al_hal_eth_adapter *adapter, uint8_t idx, struct al_eth_ec_stat_udma *stats); + +/* trafic control */ + +/** + * perform Function Level Reset RMN + * + * Addressing RMN: 714 + * + * @param pci_read_config_u32 pointer to function that reads register from pci header + * @param pci_write_config_u32 pointer to function that writes register from pci header + * @param handle pointer passes to the above functions as first parameter + * @param mac_base base address of the MAC registers + * + * @return 0. + */ +int al_eth_flr_rmn(int (* pci_read_config_u32)(void *handle, int where, uint32_t *val), + int (* pci_write_config_u32)(void *handle, int where, uint32_t val), + void *handle, + void __iomem *mac_base); + +/** + * perform Function Level Reset RMN but restore registers that contain board specific data + * + * the data that save and restored is the board params and mac addresses + * + * @param pci_read_config_u32 pointer to function that reads register from pci header + * @param pci_write_config_u32 pointer to function that writes register from pci header + * @param handle pointer passes to the above functions as first parameter + * @param mac_base base address of the MAC registers + * @param ec_base base address of the Ethernet Controller registers + * @param mac_addresses_num number of mac addresses to restore + * + * @return 0. + */ +int al_eth_flr_rmn_restore_params(int (* pci_read_config_u32)(void *handle, int where, uint32_t *val), + int (* pci_write_config_u32)(void *handle, int where, uint32_t val), + void *handle, + void __iomem *mac_base, + void __iomem *ec_base, + int mac_addresses_num); + +/* board specific information (media type, phy address, etc.. */ + + +enum al_eth_board_media_type { + AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT = 0, + AL_ETH_BOARD_MEDIA_TYPE_RGMII = 1, + AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR = 2, + AL_ETH_BOARD_MEDIA_TYPE_SGMII = 3, + AL_ETH_BOARD_MEDIA_TYPE_1000BASE_X = 4, + AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED = 5, + AL_ETH_BOARD_MEDIA_TYPE_SGMII_2_5G = 6, + AL_ETH_BOARD_MEDIA_TYPE_NBASE_T = 7, +}; + +enum al_eth_board_mdio_freq { + AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ = 0, + AL_ETH_BOARD_MDIO_FREQ_1_MHZ = 1, +}; + +enum al_eth_board_ext_phy_if { + AL_ETH_BOARD_PHY_IF_MDIO = 0, + AL_ETH_BOARD_PHY_IF_XMDIO = 1, + AL_ETH_BOARD_PHY_IF_I2C = 2, + +}; + +enum al_eth_board_auto_neg_mode { + AL_ETH_BOARD_AUTONEG_OUT_OF_BAND = 0, + AL_ETH_BOARD_AUTONEG_IN_BAND = 1, + +}; + +/* declare the 1G mac active speed when auto negotiation disabled */ +enum al_eth_board_1g_speed { + AL_ETH_BOARD_1G_SPEED_1000M = 0, + AL_ETH_BOARD_1G_SPEED_100M = 1, + AL_ETH_BOARD_1G_SPEED_10M = 2, +}; + +enum al_eth_retimer_channel { + AL_ETH_RETIMER_CHANNEL_A = 0, + AL_ETH_RETIMER_CHANNEL_B = 1, +}; + +/** structure represents the board information. this info set by boot loader + * and read by OS driver. + */ +struct al_eth_board_params { + enum al_eth_board_media_type media_type; + al_bool phy_exist; /**< external phy exist */ + uint8_t phy_mdio_addr; /**< mdio address of external phy */ + al_bool sfp_plus_module_exist; /**< SFP+ module connected */ + al_bool autoneg_enable; /**< enable Auto-Negotiation */ + al_bool kr_lt_enable; /**< enable KR Link-Training */ + al_bool kr_fec_enable; /**< enable KR FEC */ + enum al_eth_board_mdio_freq mdio_freq; /**< MDIO frequency */ + uint8_t i2c_adapter_id; /**< identifier for the i2c adapter to use to access SFP+ module */ + enum al_eth_board_ext_phy_if phy_if; /**< phy interface */ + enum al_eth_board_auto_neg_mode an_mode; /**< auto-negotiation mode (in-band / out-of-band) */ + uint8_t serdes_grp; /**< serdes's group id */ + uint8_t serdes_lane; /**< serdes's lane id */ + enum al_eth_ref_clk_freq ref_clk_freq; /**< reference clock frequency */ + al_bool dont_override_serdes; /**< prevent override serdes parameters */ + al_bool force_1000_base_x; /**< set mac to 1000 base-x mode (instead sgmii) */ + al_bool an_disable; /**< disable auto negotiation */ + enum al_eth_board_1g_speed speed; /**< port speed if AN disabled */ + al_bool half_duplex; /**< force half duplex if AN disabled */ + al_bool fc_disable; /**< disable flow control */ + al_bool retimer_exist; /**< retimer is exist on the board */ + uint8_t retimer_bus_id; /**< in what i2c bus the retimer is on */ + uint8_t retimer_i2c_addr; /**< i2c address of the retimer */ + enum al_eth_retimer_channel retimer_channel; /**< what channel connected to this port */ + al_bool dac; /**< assume direct attached cable is connected if auto detect is off or failed */ + uint8_t dac_len; /**< assume this cable length if auto detect is off or failed */ +}; + +/** + * set board parameter of the eth port + * this function used to set the board parameters into scratchpad + * registers. those paramters can be read later by OS driver. + * + * @param mac_base the virtual address of the mac registers (PCI BAR 2) + * @param params pointer to structure the includes the paramters + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_board_params_set(void * __iomem mac_base, struct al_eth_board_params *params); + +/** + * get board parameter of the eth port + * this function used to get the board parameters from scratchpad + * registers. + * + * @param mac_base the virtual address of the mac registers (PCI BAR 2) + * @param params pointer to structure where the parameters will be stored. + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_board_params_get(void * __iomem mac_base, struct al_eth_board_params *params); + +/* + * Wake-On-Lan (WoL) + * + * The following few functions configure the Wake-On-Lan packet detection + * inside the Integrated Ethernet MAC. + * + * There are other alternative ways to set WoL, such using the + * external 1000Base-T transceiver to set WoL mode. + * + * These APIs do not set the system-wide power-state, nor responsible on the + * transition from Sleep to Normal power state. + * + * For system level considerations, please refer to Annapurna Labs Alpine Wiki. + */ +/* Interrupt enable WoL MAC DA Unicast detected packet */ +#define AL_ETH_WOL_INT_UNICAST AL_BIT(0) +/* Interrupt enable WoL L2 Multicast detected packet */ +#define AL_ETH_WOL_INT_MULTICAST AL_BIT(1) +/* Interrupt enable WoL L2 Broadcast detected packet */ +#define AL_ETH_WOL_INT_BROADCAST AL_BIT(2) +/* Interrupt enable WoL IPv4 detected packet */ +#define AL_ETH_WOL_INT_IPV4 AL_BIT(3) +/* Interrupt enable WoL IPv6 detected packet */ +#define AL_ETH_WOL_INT_IPV6 AL_BIT(4) +/* Interrupt enable WoL EtherType+MAC DA detected packet */ +#define AL_ETH_WOL_INT_ETHERTYPE_DA AL_BIT(5) +/* Interrupt enable WoL EtherType+L2 Broadcast detected packet */ +#define AL_ETH_WOL_INT_ETHERTYPE_BC AL_BIT(6) +/* Interrupt enable WoL parser detected packet */ +#define AL_ETH_WOL_INT_PARSER AL_BIT(7) +/* Interrupt enable WoL magic detected packet */ +#define AL_ETH_WOL_INT_MAGIC AL_BIT(8) +/* Interrupt enable WoL magic+password detected packet */ +#define AL_ETH_WOL_INT_MAGIC_PSWD AL_BIT(9) + +/* Forward enable WoL MAC DA Unicast detected packet */ +#define AL_ETH_WOL_FWRD_UNICAST AL_BIT(0) +/* Forward enable WoL L2 Multicast detected packet */ +#define AL_ETH_WOL_FWRD_MULTICAST AL_BIT(1) +/* Forward enable WoL L2 Broadcast detected packet */ +#define AL_ETH_WOL_FWRD_BROADCAST AL_BIT(2) +/* Forward enable WoL IPv4 detected packet */ +#define AL_ETH_WOL_FWRD_IPV4 AL_BIT(3) +/* Forward enable WoL IPv6 detected packet */ +#define AL_ETH_WOL_FWRD_IPV6 AL_BIT(4) +/* Forward enable WoL EtherType+MAC DA detected packet */ +#define AL_ETH_WOL_FWRD_ETHERTYPE_DA AL_BIT(5) +/* Forward enable WoL EtherType+L2 Broadcast detected packet */ +#define AL_ETH_WOL_FWRD_ETHERTYPE_BC AL_BIT(6) +/* Forward enable WoL parser detected packet */ +#define AL_ETH_WOL_FWRD_PARSER AL_BIT(7) + +struct al_eth_wol_params { + uint8_t *dest_addr; /**< 6 bytes array of destanation address for + magic packet detection */ + uint8_t *pswd; /**< 6 bytes array of the password to use */ + uint8_t *ipv4; /**< 4 bytes array of the ipv4 to use. + example: for ip = 192.168.1.2 + ipv4[0]=2, ipv4[1]=1, ipv4[2]=168, ipv4[3]=192 */ + uint8_t *ipv6; /** 16 bytes array of the ipv6 to use. + example: ip = 2607:f0d0:1002:0051:0000:0000:5231:1234 + ipv6[0]=34, ipv6[1]=12, ipv6[2]=31 .. */ + uint16_t ethr_type1; /**< first ethertype to use */ + uint16_t ethr_type2; /**< secound ethertype to use */ + uint16_t forward_mask; /**< bitmask of AL_ETH_WOL_FWRD_* of the packet + types needed to be forward. */ + uint16_t int_mask; /**< bitmask of AL_ETH_WOL_INT_* of the packet types + that will send interrupt to wake the system. */ +}; + +/** + * enable the wol mechanism + * set what type of packets will wake up the system and what type of packets + * neet to forward after the system is up + * + * beside this function wol filter also need to be set by + * calling al_eth_filter_config with AL_ETH_RFW_FILTER_WOL + * + * @param adapter pointer to the private structure + * @param wol the parameters needed to configure the wol + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_wol_enable( + struct al_hal_eth_adapter *adapter, + struct al_eth_wol_params *wol); + +/** + * Disable the WoL mechnism. + * + * @param adapter pointer to the private structure + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_wol_disable( + struct al_hal_eth_adapter *adapter); + +/** + * Configure tx fwd vlan table entry + * + * @param adapter pointer to the private structure + * @param idx the entry index within the vlan table. The HW uses the vlan id + * field of the packet when accessing this table. + * @param udma_mask vlan table value that indicates that the packet should be forward back to + * the udmas, through the Rx path (udma_mask is one-hot representation) + * @param fwd_to_mac vlan table value that indicates that the packet should be forward to mac + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_tx_fwd_vid_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t udma_mask, al_bool fwd_to_mac); + +/** Tx Generic protocol detect Cam compare table entry */ +struct al_eth_tx_gpd_cam_entry { + enum AL_ETH_PROTO_ID l3_proto_idx; + enum AL_ETH_PROTO_ID l4_proto_idx; + enum AL_ETH_TX_TUNNEL_MODE tunnel_control; + uint8_t source_vlan_count:2; + uint8_t tx_gpd_cam_ctrl:1; + uint8_t l3_proto_idx_mask:5; + uint8_t l4_proto_idx_mask:5; + uint8_t tunnel_control_mask:3; + uint8_t source_vlan_count_mask:2; +}; + +/** Rx Generic protocol detect Cam compare table entry */ +struct al_eth_rx_gpd_cam_entry { + enum AL_ETH_PROTO_ID outer_l3_proto_idx; + enum AL_ETH_PROTO_ID outer_l4_proto_idx; + enum AL_ETH_PROTO_ID inner_l3_proto_idx; + enum AL_ETH_PROTO_ID inner_l4_proto_idx; + uint8_t parse_ctrl; + uint8_t outer_l3_len; + uint8_t l3_priority; + uint8_t l4_dst_port_lsb; + uint8_t rx_gpd_cam_ctrl:1; + uint8_t outer_l3_proto_idx_mask:5; + uint8_t outer_l4_proto_idx_mask:5; + uint8_t inner_l3_proto_idx_mask:5; + uint8_t inner_l4_proto_idx_mask:5; + uint8_t parse_ctrl_mask; + uint8_t outer_l3_len_mask; + uint8_t l3_priority_mask; + uint8_t l4_dst_port_lsb_mask; +}; + +enum AL_ETH_ALU_OPCODE { + AL_ALU_FWD_A = 0, + AL_ALU_ARITHMETIC_ADD = 1, + AL_ALU_ARITHMETIC_SUBTRACT = 2, + AL_ALU_BITWISE_AND = 3, + AL_ALU_BITWISE_OR = 4, + AL_ALU_SHIFT_RIGHT_A_BY_B = 5, + AL_ALU_SHIFT_LEFT_A_BY_B = 6, + AL_ALU_BITWISE_XOR = 7, + AL_ALU_FWD_INV_A = 16, + AL_ALU_ARITHMETIC_ADD_INV_A_AND_B = 17, + AL_ALU_ARITHMETIC_SUBTRACT_INV_A_AND_B = 18, + AL_ALU_BITWISE_AND_INV_A_AND_B = 19, + AL_ALU_BITWISE_OR_INV_A_AND_B = 20, + AL_ALU_SHIFT_RIGHT_INV_A_BY_B = 21, + AL_ALU_SHIFT_LEFT_INV_A_BY_B = 22, + AL_ALU_BITWISE_XOR_INV_A_AND_B = 23, + AL_ALU_ARITHMETIC_ADD_A_AND_INV_B = 33, + AL_ALU_ARITHMETIC_SUBTRACT_A_AND_INV_B = 34, + AL_ALU_BITWISE_AND_A_AND_INV_B = 35, + AL_ALU_BITWISE_OR_A_AND_INV_B = 36, + AL_ALU_SHIFT_RIGHT_A_BY_INV_B = 37, + AL_ALU_SHIFT_LEFT_A_BY_INV_B = 38, + AL_ALU_BITWISE_XOR_A_AND_INV_B = 39, + AL_ALU_ARITHMETIC_ADD_INV_A_AND_INV_B = 49, + AL_ALU_ARITHMETIC_SUBTRACT_INV_A_AND = 50, + AL_ALU_BITWISE_AND_INV_A_AND_INV_B = 51, + AL_ALU_BITWISE_OR_INV_A_AND_INV_B = 52, + AL_ALU_SHIFT_RIGHT_INV_A_BY_INV_B = 53, + AL_ALU_SHIFT_LEFT_INV_A_BY_INV_B = 54, + AL_ALU_BITWISE_XOR_INV_A_AND_INV_B = 55 +}; + +enum AL_ETH_TX_GCP_ALU_OPSEL { + AL_ETH_TX_GCP_ALU_L3_OFFSET = 0, + AL_ETH_TX_GCP_ALU_OUTER_L3_OFFSET = 1, + AL_ETH_TX_GCP_ALU_L3_LEN = 2, + AL_ETH_TX_GCP_ALU_OUTER_L3_LEN = 3, + AL_ETH_TX_GCP_ALU_L4_OFFSET = 4, + AL_ETH_TX_GCP_ALU_L4_LEN = 5, + AL_ETH_TX_GCP_ALU_TABLE_VAL = 10 +}; + +enum AL_ETH_RX_GCP_ALU_OPSEL { + AL_ETH_RX_GCP_ALU_OUTER_L3_OFFSET = 0, + AL_ETH_RX_GCP_ALU_INNER_L3_OFFSET = 1, + AL_ETH_RX_GCP_ALU_OUTER_L4_OFFSET = 2, + AL_ETH_RX_GCP_ALU_INNER_L4_OFFSET = 3, + AL_ETH_RX_GCP_ALU_OUTER_L3_HDR_LEN_LAT = 4, + AL_ETH_RX_GCP_ALU_INNER_L3_HDR_LEN_LAT = 5, + AL_ETH_RX_GCP_ALU_OUTER_L3_HDR_LEN_SEL = 6, + AL_ETH_RX_GCP_ALU_INNER_L3_HDR_LEN_SEL = 7, + AL_ETH_RX_GCP_ALU_PARSE_RESULT_VECTOR_OFFSET_1 = 8, + AL_ETH_RX_GCP_ALU_PARSE_RESULT_VECTOR_OFFSET_2 = 9, + AL_ETH_RX_GCP_ALU_TABLE_VAL = 10 +}; + +/** Tx Generic crc prameters table entry */ + +struct al_eth_tx_gcp_table_entry { + uint8_t poly_sel:1; + uint8_t crc32_bit_comp:1; + uint8_t crc32_bit_swap:1; + uint8_t crc32_byte_swap:1; + uint8_t data_bit_swap:1; + uint8_t data_byte_swap:1; + uint8_t trail_size:4; + uint8_t head_size:8; + uint8_t head_calc:1; + uint8_t mask_polarity:1; + enum AL_ETH_ALU_OPCODE tx_alu_opcode_1; + enum AL_ETH_ALU_OPCODE tx_alu_opcode_2; + enum AL_ETH_ALU_OPCODE tx_alu_opcode_3; + enum AL_ETH_TX_GCP_ALU_OPSEL tx_alu_opsel_1; + enum AL_ETH_TX_GCP_ALU_OPSEL tx_alu_opsel_2; + enum AL_ETH_TX_GCP_ALU_OPSEL tx_alu_opsel_3; + enum AL_ETH_TX_GCP_ALU_OPSEL tx_alu_opsel_4; + uint32_t gcp_mask[6]; + uint32_t crc_init; + uint8_t gcp_table_res:7; + uint16_t alu_val:9; +}; + +/** Rx Generic crc prameters table entry */ + +struct al_eth_rx_gcp_table_entry { + uint8_t poly_sel:1; + uint8_t crc32_bit_comp:1; + uint8_t crc32_bit_swap:1; + uint8_t crc32_byte_swap:1; + uint8_t data_bit_swap:1; + uint8_t data_byte_swap:1; + uint8_t trail_size:4; + uint8_t head_size:8; + uint8_t head_calc:1; + uint8_t mask_polarity:1; + enum AL_ETH_ALU_OPCODE rx_alu_opcode_1; + enum AL_ETH_ALU_OPCODE rx_alu_opcode_2; + enum AL_ETH_ALU_OPCODE rx_alu_opcode_3; + enum AL_ETH_RX_GCP_ALU_OPSEL rx_alu_opsel_1; + enum AL_ETH_RX_GCP_ALU_OPSEL rx_alu_opsel_2; + enum AL_ETH_RX_GCP_ALU_OPSEL rx_alu_opsel_3; + enum AL_ETH_RX_GCP_ALU_OPSEL rx_alu_opsel_4; + uint32_t gcp_mask[6]; + uint32_t crc_init; + uint32_t gcp_table_res:27; + uint16_t alu_val:9; +}; + +/** Tx per_protocol_number crc & l3_checksum & l4_checksum command table entry */ + +struct al_eth_tx_crc_chksum_replace_cmd_for_protocol_num_entry { + al_bool crc_en_00; /*from Tx_buffer_descriptor: enable_l4_checksum is 0 ,enable_l3_checksum is 0 */ + al_bool crc_en_01; /*from Tx_buffer_descriptor: enable_l4_checksum is 0 ,enable_l3_checksum is 1 */ + al_bool crc_en_10; /*from Tx_buffer_descriptor: enable_l4_checksum is 1 ,enable_l3_checksum is 0 */ + al_bool crc_en_11; /*from Tx_buffer_descriptor: enable_l4_checksum is 1 ,enable_l3_checksum is 1 */ + al_bool l4_csum_en_00; /*from Tx_buffer_descriptor: enable_l4_checksum is 0 ,enable_l3_checksum is 0 */ + al_bool l4_csum_en_01; /*from Tx_buffer_descriptor: enable_l4_checksum is 0 ,enable_l3_checksum is 1 */ + al_bool l4_csum_en_10; /*from Tx_buffer_descriptor: enable_l4_checksum is 1 ,enable_l3_checksum is 0 */ + al_bool l4_csum_en_11; /*from Tx_buffer_descriptor: enable_l4_checksum is 1 ,enable_l3_checksum is 1 */ + al_bool l3_csum_en_00; /*from Tx_buffer_descriptor: enable_l4_checksum is 0 ,enable_l3_checksum is 0 */ + al_bool l3_csum_en_01; /*from Tx_buffer_descriptor: enable_l4_checksum is 0 ,enable_l3_checksum is 1 */ + al_bool l3_csum_en_10; /*from Tx_buffer_descriptor: enable_l4_checksum is 1 ,enable_l3_checksum is 0 */ + al_bool l3_csum_en_11; /*from Tx_buffer_descriptor: enable_l4_checksum is 1 ,enable_l3_checksum is 1 */ +}; + +/** + * Configure tx_generic_crc_entry + * + * @param adapter pointer to the private structure + * @param idx the entry index + * @param tx_gpd_entry entry data for the Tx protocol detect Cam compare table + * @param tx_gcp_entry entry data for the Tx Generic crc prameters table + * + * @return 0 on success. otherwise on failure. + * + */ +int al_eth_tx_generic_crc_entry_set(struct al_hal_eth_adapter *adapter, uint32_t idx, + struct al_eth_tx_gpd_cam_entry *tx_gpd_entry, + struct al_eth_tx_gcp_table_entry *tx_gcp_entry, + struct al_eth_tx_crc_chksum_replace_cmd_for_protocol_num_entry *tx_replace_entry); + +/** + * Configure rx_generic_crc_entry + * + * @param adapter pointer to the private structure + * @param idx the entry index + * @param rx_gpd_entry entry data for the Tx protocol detect Cam compare table + * @param rx_gcp_entry entry data for the Tx Generic crc prameters table + * + * @return 0 on success. otherwise on failure. + * + */ +int al_eth_rx_generic_crc_entry_set(struct al_hal_eth_adapter *adapter, uint32_t idx, + struct al_eth_rx_gpd_cam_entry *rx_gpd_entry, + struct al_eth_rx_gcp_table_entry *rx_gcp_entry); + +/** + * Configure generic_crc + * + * @param adapter pointer to the private structure + * + */ +int al_eth_generic_crc_init(struct al_hal_eth_adapter *adapter); + +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +#endif /* __AL_HAL_ETH_H__ */ +/** @} end of Ethernet group */ diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_ec_regs.h b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_ec_regs.h new file mode 100644 index 00000000000000..546dcdd3ca914f --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_ec_regs.h @@ -0,0 +1,3461 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @{ + * @file al_hal_eth_ec_regs.h + * + * @brief Ethernet controller registers + * + */ + +#ifndef __AL_HAL_EC_REG_H +#define __AL_HAL_EC_REG_H + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + + + +struct al_ec_gen { + /* [0x0] Ethernet controller Version */ + uint32_t version; + /* [0x4] Enable modules operation. */ + uint32_t en; + /* [0x8] Enable FIFO operation on the EC side. */ + uint32_t fifo_en; + /* [0xc] General L2 configuration for the Ethernet controlle ... */ + uint32_t l2; + /* [0x10] Configure protocol index values */ + uint32_t cfg_i; + /* [0x14] Configure protocol index values (extended protocols ... */ + uint32_t cfg_i_ext; + /* [0x18] Enable modules operation (extended operations). */ + uint32_t en_ext; + uint32_t rsrvd[9]; +}; +struct al_ec_mac { + /* [0x0] General configuration of the MAC side of the Ethern ... */ + uint32_t gen; + /* [0x4] Minimum packet size */ + uint32_t min_pkt; + /* [0x8] Maximum packet size */ + uint32_t max_pkt; + uint32_t rsrvd[13]; +}; +struct al_ec_rxf { + /* [0x0] Rx FIFO input controller configuration 1 */ + uint32_t cfg_1; + /* [0x4] Rx FIFO input controller configuration 2 */ + uint32_t cfg_2; + /* [0x8] Threshold to start reading packet from the Rx FIFO */ + uint32_t rd_fifo; + /* [0xc] Threshold to stop writing packet to the Rx FIFO */ + uint32_t wr_fifo; + /* [0x10] Threshold to stop writing packet to the loopback FI ... */ + uint32_t lb_fifo; + /* [0x14] Rx FIFO input controller loopback FIFO configuratio ... */ + uint32_t cfg_lb; + /* [0x18] Configuration for dropping packet at the FIFO outpu ... */ + uint32_t out_drop; + uint32_t rsrvd[25]; +}; +struct al_ec_epe { + /* [0x0] Ethernet parsing engine configuration 1 */ + uint32_t parse_cfg; + /* [0x4] Protocol index action table address */ + uint32_t act_table_addr; + /* [0x8] Protocol index action table data */ + uint32_t act_table_data_1; + /* [0xc] Protocol index action table data */ + uint32_t act_table_data_2; + /* [0x10] Protocol index action table data */ + uint32_t act_table_data_3; + /* [0x14] Protocol index action table data */ + uint32_t act_table_data_4; + /* [0x18] Protocol index action table data */ + uint32_t act_table_data_5; + /* [0x1c] Protocol index action table data */ + uint32_t act_table_data_6; + /* [0x20] Input result vector, default values for parser inpu ... */ + uint32_t res_def; + /* [0x24] Result input vector selection */ + uint32_t res_in; + uint32_t rsrvd[6]; +}; +struct al_ec_epe_res { + /* [0x0] Parser result vector pointer */ + uint32_t p1; + /* [0x4] Parser result vector pointer */ + uint32_t p2; + /* [0x8] Parser result vector pointer */ + uint32_t p3; + /* [0xc] Parser result vector pointer */ + uint32_t p4; + /* [0x10] Parser result vector pointer */ + uint32_t p5; + /* [0x14] Parser result vector pointer */ + uint32_t p6; + /* [0x18] Parser result vector pointer */ + uint32_t p7; + /* [0x1c] Parser result vector pointer */ + uint32_t p8; + /* [0x20] Parser result vector pointer */ + uint32_t p9; + /* [0x24] Parser result vector pointer */ + uint32_t p10; + /* [0x28] Parser result vector pointer */ + uint32_t p11; + /* [0x2c] Parser result vector pointer */ + uint32_t p12; + /* [0x30] Parser result vector pointer */ + uint32_t p13; + /* [0x34] Parser result vector pointer */ + uint32_t p14; + /* [0x38] Parser result vector pointer */ + uint32_t p15; + /* [0x3c] Parser result vector pointer */ + uint32_t p16; + /* [0x40] Parser result vector pointer */ + uint32_t p17; + /* [0x44] Parser result vector pointer */ + uint32_t p18; + /* [0x48] Parser result vector pointer */ + uint32_t p19; + /* [0x4c] Parser result vector pointer */ + uint32_t p20; + uint32_t rsrvd[12]; +}; +struct al_ec_epe_h { + /* [0x0] Header length, support for header length table for ... */ + uint32_t hdr_len; +}; +struct al_ec_epe_p { + /* [0x0] Data for comparison */ + uint32_t comp_data; + /* [0x4] Mask for comparison */ + uint32_t comp_mask; + /* [0x8] Compare control */ + uint32_t comp_ctrl; + uint32_t rsrvd[4]; +}; +struct al_ec_epe_a { + /* [0x0] Protocol index action register */ + uint32_t prot_act; +}; +struct al_ec_rfw { + /* [0x0] Tuple (4/2) Hash configuration */ + uint32_t thash_cfg_1; + /* [0x4] Tuple (4/2) Hash configuration */ + uint32_t thash_cfg_2; + /* [0x8] MAC Hash configuration */ + uint32_t mhash_cfg_1; + /* [0xc] MAC Hash configuration */ + uint32_t mhash_cfg_2; + /* [0x10] MAC Hash configuration */ + uint32_t hdr_split; + /* [0x14] Masking the errors described in register rxf_drop ... */ + uint32_t meta_err; + /* [0x18] Configuration for generating the MetaData for the R ... */ + uint32_t meta; + /* [0x1c] Configuration for generating the MetaData for the R ... */ + uint32_t filter; + /* [0x20] 4 tupple hash table address */ + uint32_t thash_table_addr; + /* [0x24] 4 tupple hash table data */ + uint32_t thash_table_data; + /* [0x28] MAC hash table address */ + uint32_t mhash_table_addr; + /* [0x2c] MAC hash table data */ + uint32_t mhash_table_data; + /* [0x30] VLAN table address */ + uint32_t vid_table_addr; + /* [0x34] VLAN table data */ + uint32_t vid_table_data; + /* [0x38] VLAN p-bits table address */ + uint32_t pbits_table_addr; + /* [0x3c] VLAN p-bits table data */ + uint32_t pbits_table_data; + /* [0x40] DSCP table address */ + uint32_t dscp_table_addr; + /* [0x44] DSCP table data */ + uint32_t dscp_table_data; + /* [0x48] TC table address */ + uint32_t tc_table_addr; + /* [0x4c] TC table data */ + uint32_t tc_table_data; + /* [0x50] Control table address */ + uint32_t ctrl_table_addr; + /* [0x54] Control table data */ + uint32_t ctrl_table_data; + /* [0x58] Forwarding output configuration */ + uint32_t out_cfg; + /* [0x5c] Flow steering mechanism, +Table address */ + uint32_t fsm_table_addr; + /* [0x60] Flow steering mechanism, +Table data */ + uint32_t fsm_table_data; + /* [0x64] Selection of data to be used in packet forwarding0 ... */ + uint32_t ctrl_sel; + /* [0x68] Default VLAN data, used for untagged packets */ + uint32_t default_vlan; + /* [0x6c] Default HASH output values */ + uint32_t default_hash; + /* [0x70] Default override values, if a packet was filtered b ... */ + uint32_t default_or; + /* [0x74] Latched information when a drop condition occurred */ + uint32_t drop_latch; + /* [0x78] Check sum calculation configuration */ + uint32_t checksum; + /* [0x7c] LRO offload engine configuration register */ + uint32_t lro_cfg_1; + /* [0x80] LRO offload engine Check rules configurations for I ... */ + uint32_t lro_check_ipv4; + /* [0x84] LRO offload engine IPv4 values configuration */ + uint32_t lro_ipv4; + /* [0x88] LRO offload engine Check rules configurations for I ... */ + uint32_t lro_check_ipv6; + /* [0x8c] LRO offload engine IPv6 values configuration */ + uint32_t lro_ipv6; + /* [0x90] LRO offload engine Check rules configurations for T ... */ + uint32_t lro_check_tcp; + /* [0x94] LRO offload engine IPv6 values configuration */ + uint32_t lro_tcp; + /* [0x98] LRO offload engine Check rules configurations for U ... */ + uint32_t lro_check_udp; + /* [0x9c] LRO offload engine Check rules configurations for U ... */ + uint32_t lro_check_l2; + /* [0xa0] LRO offload engine Check rules configurations for U ... */ + uint32_t lro_check_gen; + /* [0xa4] Rules for storing packet information into the cache ... */ + uint32_t lro_store; + /* [0xa8] VLAN table default */ + uint32_t vid_table_def; + /* [0xac] Control table default */ + uint32_t ctrl_table_def; + /* [0xb0] Additional configuration 0 */ + uint32_t cfg_a_0; + /* [0xb4] Tuple (4/2) Hash configuration (extended for RoCE a ... */ + uint32_t thash_cfg_3; + /* [0xb8] Tuple (4/2) Hash configuration , mask for the input ... */ + uint32_t thash_mask_outer_ipv6; + /* [0xbc] Tuple (4/2) Hash configuration , mask for the input ... */ + uint32_t thash_mask_outer; + /* [0xc0] Tuple (4/2) Hash configuration , mask for the input ... */ + uint32_t thash_mask_inner_ipv6; + /* [0xc4] Tuple (4/2) Hash configuration , mask for the input ... */ + uint32_t thash_mask_inner; + uint32_t rsrvd[10]; +}; +struct al_ec_rfw_udma { + /* [0x0] Per UDMA default configuration */ + uint32_t def_cfg; +}; +struct al_ec_rfw_hash { + /* [0x0] key configuration (320 bits) */ + uint32_t key; +}; +struct al_ec_rfw_priority { + /* [0x0] Priority to queue mapping configuration */ + uint32_t queue; +}; +struct al_ec_rfw_default { + /* [0x0] Default forwarding configuration options */ + uint32_t opt_1; +}; +struct al_ec_fwd_mac { + /* [0x0] MAC address data [31:0] */ + uint32_t data_l; + /* [0x4] MAC address data [15:0] */ + uint32_t data_h; + /* [0x8] MAC address mask [31:0] */ + uint32_t mask_l; + /* [0xc] MAC address mask [15:0] */ + uint32_t mask_h; + /* [0x10] MAC compare control */ + uint32_t ctrl; +}; +struct al_ec_msw { + /* [0x0] Configuration for unicast packets */ + uint32_t uc; + /* [0x4] Configuration for multicast packets */ + uint32_t mc; + /* [0x8] Configuration for broadcast packets */ + uint32_t bc; + uint32_t rsrvd[3]; +}; +struct al_ec_tso { + /* [0x0] Input configuration */ + uint32_t in_cfg; + /* [0x4] MetaData default cache table address */ + uint32_t cache_table_addr; + /* [0x8] MetaData default cache table data */ + uint32_t cache_table_data_1; + /* [0xc] MetaData default cache table data */ + uint32_t cache_table_data_2; + /* [0x10] MetaData default cache table data */ + uint32_t cache_table_data_3; + /* [0x14] MetaData default cache table data */ + uint32_t cache_table_data_4; + /* [0x18] TCP control bit operation for first segment */ + uint32_t ctrl_first; + /* [0x1c] TCP control bit operation for middle segments */ + uint32_t ctrl_middle; + /* [0x20] TCP control bit operation for last segment */ + uint32_t ctrl_last; + /* [0x24] Additional TSO configurations */ + uint32_t cfg_add_0; + /* [0x28] TSO configuration for tunnelled packets */ + uint32_t cfg_tunnel; + uint32_t rsrvd[13]; +}; +struct al_ec_tso_sel { + /* [0x0] MSS value */ + uint32_t mss; +}; +struct al_ec_tpe { + /* [0x0] Parsing configuration */ + uint32_t parse; + uint32_t rsrvd[15]; +}; +struct al_ec_tpm_udma { + /* [0x0] Default VLAN data */ + uint32_t vlan_data; + /* [0x4] UDMA MAC SA information for spoofing */ + uint32_t mac_sa_1; + /* [0x8] UDMA MAC SA information for spoofing */ + uint32_t mac_sa_2; +}; +struct al_ec_tpm_sel { + /* [0x0] Ethertype values for VLAN modification */ + uint32_t etype; +}; +struct al_ec_tfw { + /* [0x0] Tx FIFO Wr configuration */ + uint32_t tx_wr_fifo; + /* [0x4] VLAN table address */ + uint32_t tx_vid_table_addr; + /* [0x8] VLAN table data */ + uint32_t tx_vid_table_data; + /* [0xc] Tx FIFO Rd configuration */ + uint32_t tx_rd_fifo; + /* [0x10] Tx FIFO Rd configuration, checksum insertion */ + uint32_t tx_checksum; + /* [0x14] Tx forwarding general configuration register */ + uint32_t tx_gen; + /* [0x18] Tx spoofing configuration */ + uint32_t tx_spf; + /* [0x1c] TX data FIFO status */ + uint32_t data_fifo; + /* [0x20] Tx control FIFO status */ + uint32_t ctrl_fifo; + /* [0x24] Tx header FIFO status */ + uint32_t hdr_fifo; + uint32_t rsrvd[14]; +}; +struct al_ec_tfw_udma { + /* [0x0] Default GMDA output bitmap for unicast packet */ + uint32_t uc_udma; + /* [0x4] Default GMDA output bitmap for multicast packet */ + uint32_t mc_udma; + /* [0x8] Default GMDA output bitmap for broadcast packet */ + uint32_t bc_udma; + /* [0xc] Tx spoofing configuration */ + uint32_t spf_cmd; + /* [0x10] Forwarding decision control */ + uint32_t fwd_dec; + uint32_t rsrvd; +}; +struct al_ec_tmi { + /* [0x0] Forward packets back to the Rx data path for local ... */ + uint32_t tx_cfg; + uint32_t rsrvd[3]; +}; +struct al_ec_efc { + /* [0x0] Mask of pause_on [7:0] for the Ethernet controller ... */ + uint32_t ec_pause; + /* [0x4] Mask of Ethernet controller Almost Full indication ... */ + uint32_t ec_xoff; + /* [0x8] Mask for generating XON indication pulse */ + uint32_t xon; + /* [0xc] Mask for generating GPIO output XOFF indication fro ... */ + uint32_t gpio; + /* [0x10] Rx FIFO threshold for generating the Almost Full in ... */ + uint32_t rx_fifo_af; + /* [0x14] Rx FIFO threshold for generating the Almost Full in ... */ + uint32_t rx_fifo_hyst; + /* [0x18] Rx FIFO threshold for generating the Almost Full in ... */ + uint32_t stat; + /* [0x1c] XOFF timer for the 1G MACSets the interval (in SB_C ... */ + uint32_t xoff_timer_1g; + /* [0x20] PFC force flow control generation */ + uint32_t ec_pfc; + uint32_t rsrvd[3]; +}; +struct al_ec_fc_udma { + /* [0x0] Mask of "pause_on" [0] for all queues */ + uint32_t q_pause_0; + /* [0x4] Mask of "pause_on" [1] for all queues */ + uint32_t q_pause_1; + /* [0x8] Mask of "pause_on" [2] for all queues */ + uint32_t q_pause_2; + /* [0xc] Mask of "pause_on" [3] for all queues */ + uint32_t q_pause_3; + /* [0x10] Mask of "pause_on" [4] for all queues */ + uint32_t q_pause_4; + /* [0x14] Mask of "pause_on" [5] for all queues */ + uint32_t q_pause_5; + /* [0x18] Mask of "pause_on" [6] for all queues */ + uint32_t q_pause_6; + /* [0x1c] Mask of "pause_on" [7] for all queues */ + uint32_t q_pause_7; + /* [0x20] Mask of external GPIO input pause [0] for all queue ... */ + uint32_t q_gpio_0; + /* [0x24] Mask of external GPIO input pause [1] for all queue ... */ + uint32_t q_gpio_1; + /* [0x28] Mask of external GPIO input pause [2] for all queue ... */ + uint32_t q_gpio_2; + /* [0x2c] Mask of external GPIO input pause [3] for all queue ... */ + uint32_t q_gpio_3; + /* [0x30] Mask of external GPIO input [4] for all queues */ + uint32_t q_gpio_4; + /* [0x34] Mask of external GPIO input [5] for all queues */ + uint32_t q_gpio_5; + /* [0x38] Mask of external GPIO input [6] for all queues */ + uint32_t q_gpio_6; + /* [0x3c] Mask of external GPIO input [7] for all queues */ + uint32_t q_gpio_7; + /* [0x40] Mask of "pause_on" [7:0] for the UDMA stream inter ... */ + uint32_t s_pause; + /* [0x44] Mask of Rx Almost Full indication for generating XO ... */ + uint32_t q_xoff_0; + /* [0x48] Mask of Rx Almost Full indication for generating XO ... */ + uint32_t q_xoff_1; + /* [0x4c] Mask of Rx Almost Full indication for generating XO ... */ + uint32_t q_xoff_2; + /* [0x50] Mask of Rx Almost Full indication for generating XO ... */ + uint32_t q_xoff_3; + /* [0x54] Mask of Rx Almost Full indication for generating XO ... */ + uint32_t q_xoff_4; + /* [0x58] Mask of Rx Almost Full indication for generating XO ... */ + uint32_t q_xoff_5; + /* [0x5c] Mask of Rx Almost Full indication for generating XO ... */ + uint32_t q_xoff_6; + /* [0x60] Mask of Rx Almost Full indication for generating XO ... */ + uint32_t q_xoff_7; + uint32_t rsrvd[7]; +}; +struct al_ec_tpg_rpa_res { + /* [0x0] NOT used */ + uint32_t not_used; + uint32_t rsrvd[63]; +}; +struct al_ec_eee { + /* [0x0] EEE configuration */ + uint32_t cfg_e; + /* [0x4] Number of clocks to get into EEE mode. */ + uint32_t pre_cnt; + /* [0x8] Number of clocks to stop MAC EEE mode after getting ... */ + uint32_t post_cnt; + /* [0xc] Number of clocks to stop the Tx MAC interface after ... */ + uint32_t stop_cnt; + /* [0x10] EEE status */ + uint32_t stat_eee; + uint32_t rsrvd[59]; +}; +struct al_ec_stat { + /* [0x0] Rx Frequency adjust FIFO input packets */ + uint32_t faf_in_rx_pkt; + /* [0x4] Rx Frequency adjust FIFO input short error packets */ + uint32_t faf_in_rx_short; + /* [0x8] Rx Frequency adjust FIFO input long error packets */ + uint32_t faf_in_rx_long; + /* [0xc] Rx Frequency adjust FIFO output packets */ + uint32_t faf_out_rx_pkt; + /* [0x10] Rx Frequency adjust FIFO output short error packets ... */ + uint32_t faf_out_rx_short; + /* [0x14] Rx Frequency adjust FIFO output long error packets */ + uint32_t faf_out_rx_long; + /* [0x18] Rx Frequency adjust FIFO output drop packets */ + uint32_t faf_out_drop; + /* [0x1c] Number of packets written into the Rx FIFO (without ... */ + uint32_t rxf_in_rx_pkt; + /* [0x20] Number of error packets written into the Rx FIFO (w ... */ + uint32_t rxf_in_fifo_err; + /* [0x24] Number of packets written into the loopback FIFO (w ... */ + uint32_t lbf_in_rx_pkt; + /* [0x28] Number of error packets written into the loopback F ... */ + uint32_t lbf_in_fifo_err; + /* [0x2c] Number of packets read from Rx FIFO 1 */ + uint32_t rxf_out_rx_1_pkt; + /* [0x30] Number of packets read from Rx FIFO 2 (loopback FIF ... */ + uint32_t rxf_out_rx_2_pkt; + /* [0x34] Rx FIFO output drop packets from FIFO 1 */ + uint32_t rxf_out_drop_1_pkt; + /* [0x38] Rx FIFO output drop packets from FIFO 2 (loopback) */ + uint32_t rxf_out_drop_2_pkt; + /* [0x3c] Rx Parser 1, input packet counter */ + uint32_t rpe_1_in_rx_pkt; + /* [0x40] Rx Parser 1, output packet counter */ + uint32_t rpe_1_out_rx_pkt; + /* [0x44] Rx Parser 2, input packet counter */ + uint32_t rpe_2_in_rx_pkt; + /* [0x48] Rx Parser 2, output packet counter */ + uint32_t rpe_2_out_rx_pkt; + /* [0x4c] Rx Parser 3 (MACsec), input packet counter */ + uint32_t rpe_3_in_rx_pkt; + /* [0x50] Rx Parser 3 (MACsec), output packet counter */ + uint32_t rpe_3_out_rx_pkt; + /* [0x54] Tx parser, input packet counter */ + uint32_t tpe_in_tx_pkt; + /* [0x58] Tx parser, output packet counter */ + uint32_t tpe_out_tx_pkt; + /* [0x5c] Tx packet modification, input packet counter */ + uint32_t tpm_tx_pkt; + /* [0x60] Tx forwarding input packet counter */ + uint32_t tfw_in_tx_pkt; + /* [0x64] Tx forwarding input packet counter */ + uint32_t tfw_out_tx_pkt; + /* [0x68] Rx forwarding input packet counter */ + uint32_t rfw_in_rx_pkt; + /* [0x6c] Rx Forwarding, packet with VLAN command drop indica ... */ + uint32_t rfw_in_vlan_drop; + /* [0x70] Rx Forwarding, packets with parse drop indication */ + uint32_t rfw_in_parse_drop; + /* [0x74] Rx Forwarding, multicast packets */ + uint32_t rfw_in_mc; + /* [0x78] Rx Forwarding, broadcast packets */ + uint32_t rfw_in_bc; + /* [0x7c] Rx Forwarding, tagged packets */ + uint32_t rfw_in_vlan_exist; + /* [0x80] Rx Forwarding, untagged packets */ + uint32_t rfw_in_vlan_nexist; + /* [0x84] Rx Forwarding, packets with MAC address drop indica ... */ + uint32_t rfw_in_mac_drop; + /* [0x88] Rx Forwarding, packets with undetected MAC address */ + uint32_t rfw_in_mac_ndet_drop; + /* [0x8c] Rx Forwarding, packets with drop indication from th ... */ + uint32_t rfw_in_ctrl_drop; + /* [0x90] Rx Forwarding, packets with L3_protocol_index drop ... */ + uint32_t rfw_in_prot_i_drop; + /* [0x94] EEE, number of times the system went into EEE state ... */ + uint32_t eee_in; + uint32_t rsrvd[90]; +}; +struct al_ec_stat_udma { + /* [0x0] Rx forwarding output packet counter */ + uint32_t rfw_out_rx_pkt; + /* [0x4] Rx forwarding output drop packet counter */ + uint32_t rfw_out_drop; + /* [0x8] Multi-stream write, number of Rx packets */ + uint32_t msw_in_rx_pkt; + /* [0xc] Multi-stream write, number of dropped packets at SO ... */ + uint32_t msw_drop_q_full; + /* [0x10] Multi-stream write, number of dropped packets at SO ... */ + uint32_t msw_drop_sop; + /* [0x14] Multi-stream write, number of dropped packets at EO ... */ + uint32_t msw_drop_eop; + /* [0x18] Multi-stream write, number of packets written to th ... */ + uint32_t msw_wr_eop; + /* [0x1c] Multi-stream write, number of packets read from the ... */ + uint32_t msw_out_rx_pkt; + /* [0x20] Number of transmitted packets without TSO enabled */ + uint32_t tso_no_tso_pkt; + /* [0x24] Number of transmitted packets with TSO enabled */ + uint32_t tso_tso_pkt; + /* [0x28] Number of TSO segments that were generated */ + uint32_t tso_seg_pkt; + /* [0x2c] Number of TSO segments that required padding */ + uint32_t tso_pad_pkt; + /* [0x30] Tx Packet modification, MAC SA spoof error */ + uint32_t tpm_tx_spoof; + /* [0x34] Tx MAC interface, input packet counter */ + uint32_t tmi_in_tx_pkt; + /* [0x38] Tx MAC interface, number of packets forwarded to th ... */ + uint32_t tmi_out_to_mac; + /* [0x3c] Tx MAC interface, number of packets forwarded to th ... */ + uint32_t tmi_out_to_rx; + /* [0x40] Tx MAC interface, number of transmitted bytes */ + uint32_t tx_q0_bytes; + /* [0x44] Tx MAC interface, number of transmitted bytes */ + uint32_t tx_q1_bytes; + /* [0x48] Tx MAC interface, number of transmitted bytes */ + uint32_t tx_q2_bytes; + /* [0x4c] Tx MAC interface, number of transmitted bytes */ + uint32_t tx_q3_bytes; + /* [0x50] Tx MAC interface, number of transmitted packets */ + uint32_t tx_q0_pkts; + /* [0x54] Tx MAC interface, number of transmitted packets */ + uint32_t tx_q1_pkts; + /* [0x58] Tx MAC interface, number of transmitted packets */ + uint32_t tx_q2_pkts; + /* [0x5c] Tx MAC interface, number of transmitted packets */ + uint32_t tx_q3_pkts; + uint32_t rsrvd[40]; +}; +struct al_ec_msp { + /* [0x0] Ethernet parsing engine configuration 1 */ + uint32_t p_parse_cfg; + /* [0x4] Protocol index action table address */ + uint32_t p_act_table_addr; + /* [0x8] Protocol index action table data */ + uint32_t p_act_table_data_1; + /* [0xc] Protocol index action table data */ + uint32_t p_act_table_data_2; + /* [0x10] Protocol index action table data */ + uint32_t p_act_table_data_3; + /* [0x14] Protocol index action table data */ + uint32_t p_act_table_data_4; + /* [0x18] Protocol index action table data */ + uint32_t p_act_table_data_5; + /* [0x1c] Protocol index action table data */ + uint32_t p_act_table_data_6; + /* [0x20] Input result vector, default values for parser inpu ... */ + uint32_t p_res_def; + /* [0x24] Result input vector selection */ + uint32_t p_res_in; + uint32_t rsrvd[6]; +}; +struct al_ec_msp_p { + /* [0x0] Header length, support for header length table for ... */ + uint32_t h_hdr_len; +}; +struct al_ec_msp_c { + /* [0x0] Data for comparison */ + uint32_t p_comp_data; + /* [0x4] Mask for comparison */ + uint32_t p_comp_mask; + /* [0x8] Compare control */ + uint32_t p_comp_ctrl; + uint32_t rsrvd[4]; +}; +struct al_ec_crce { + /* [0x0] RoCE CRC init value */ + uint32_t roce_crc_init; + /* [0x4] FCoE CRC init value */ + uint32_t fcoe_crc_init; + /* [0x8] Packet header / trailer size - bytes not included i ... */ + uint32_t pkt_cfg; + /* [0xc] Bit Byte Swap enable */ + uint32_t swap_en; + /* [0x10] RoCE mask data word 0 */ + uint32_t roce_word_0; + /* [0x14] RoCE mask data word 1 */ + uint32_t roce_word_1; + /* [0x18] RoCE mask data word 2 */ + uint32_t roce_word_2; + /* [0x1c] RoCE mask data word 3 */ + uint32_t roce_word_3; + /* [0x20] General CRC engine configuration */ + uint32_t gen_cfg; + uint32_t rsrvd[7]; +}; +struct al_ec_wol { + /* [0x0] WoL enable configuration,Packet forwarding and inte ... */ + uint32_t wol_en; + /* [0x4] Password for magic_password packet detection - bits ... */ + uint32_t magic_pswd_l; + /* [0x8] Password for magic+password packet detection - 47: ... */ + uint32_t magic_pswd_h; + /* [0xc] Configured L3 Destination IP address for WoL IPv6 p ... */ + uint32_t ipv6_dip_word0; + /* [0x10] Configured L3 Destination IP address for WoL IPv6 p ... */ + uint32_t ipv6_dip_word1; + /* [0x14] Configured L3 Destination IP address for WoL IPv6 p ... */ + uint32_t ipv6_dip_word2; + /* [0x18] Configured L3 Destination IP address for WoL IPv6 p ... */ + uint32_t ipv6_dip_word3; + /* [0x1c] Configured L3 Destination IP address for WoL IPv4 p ... */ + uint32_t ipv4_dip; + /* [0x20] Configured EtherType for WoL EtherType_da/EtherType ... */ + uint32_t ethertype; + uint32_t rsrvd[7]; +}; +struct al_ec_pth { + /* [0x0] System time counter (Time of Day) */ + uint32_t system_time_seconds; + /* [0x4] System time subseconds in a second (MSBs) */ + uint32_t system_time_subseconds_msb; + /* [0x8] System time subseconds in a second (LSBs) */ + uint32_t system_time_subseconds_lsb; + /* [0xc] Clock period in femtoseconds (MSB) */ + uint32_t clock_period_msb; + /* [0x10] Clock period in femtoseconds (LSB) */ + uint32_t clock_period_lsb; + /* [0x14] Control register for internal updates to the system ... */ + uint32_t int_update_ctrl; + /* [0x18] Value to update system_time_seconds with */ + uint32_t int_update_seconds; + /* [0x1c] Value to update system_time_subseconds_msb with */ + uint32_t int_update_subseconds_msb; + /* [0x20] Value to update system_time_subseconds_lsb with */ + uint32_t int_update_subseconds_lsb; + /* [0x24] Control register for external updates to the system ... */ + uint32_t ext_update_ctrl; + /* [0x28] Value to update system_time_seconds with */ + uint32_t ext_update_seconds; + /* [0x2c] Value to update system_time_subseconds_msb with */ + uint32_t ext_update_subseconds_msb; + /* [0x30] Value to update system_time_subseconds_lsb with */ + uint32_t ext_update_subseconds_lsb; + /* [0x34] This value represents the APB transaction delay fro ... */ + uint32_t read_compensation_subseconds_msb; + /* [0x38] This value represents the APB transaction delay fro ... */ + uint32_t read_compensation_subseconds_lsb; + /* [0x3c] This value is used for two purposes:1 */ + uint32_t int_write_compensation_subseconds_msb; + /* [0x40] This value is used for two purposes:1 */ + uint32_t int_write_compensation_subseconds_lsb; + /* [0x44] This value represents the number of cycles it for a ... */ + uint32_t ext_write_compensation_subseconds_msb; + /* [0x48] This value represents the number of cycles it for a ... */ + uint32_t ext_write_compensation_subseconds_lsb; + /* [0x4c] Value to be added to system_time before transferrin ... */ + uint32_t sync_compensation_subseconds_msb; + /* [0x50] Value to be added to system_time before transferrin ... */ + uint32_t sync_compensation_subseconds_lsb; + uint32_t rsrvd[11]; +}; +struct al_ec_pth_egress { + /* [0x0] Control register for egress trigger #k */ + uint32_t trigger_ctrl; + /* [0x4] threshold for next egress trigger (#k) - secondsWri ... */ + uint32_t trigger_seconds; + /* [0x8] Threshold for next egress trigger (#k) - subseconds ... */ + uint32_t trigger_subseconds_msb; + /* [0xc] threshold for next egress trigger (#k) - subseconds ... */ + uint32_t trigger_subseconds_lsb; + /* [0x10] External output pulse width (subseconds_msb)(Atomic ... */ + uint32_t pulse_width_subseconds_msb; + /* [0x14] External output pulse width (subseconds_lsb)(Atomic ... */ + uint32_t pulse_width_subseconds_lsb; + uint32_t rsrvd[2]; +}; +struct al_ec_pth_db { + /* [0x0] timestamp[k], in resolution of 2^18 femtosec =~ 0 */ + uint32_t ts; + /* [0x4] Timestamp entry is valid */ + uint32_t qual; + uint32_t rsrvd[4]; +}; +struct al_ec_roce { + /* [0x0] RoCE configuration */ + uint32_t roce_gen_cfg_1; + /* [0x4] READ_RESPONSE state error */ + uint32_t rr_err; + /* [0x8] READ_RESPONSE qualification error mask */ + uint32_t rr_qual; + /* [0xc] READ_RESPONSE packet counter mask */ + uint32_t rr_err_cnt; + /* [0x10] READ_RESPONSE interrupt generation error mask */ + uint32_t rr_err_int; + /* [0x14] WRITE REQUEST state error */ + uint32_t wr_err; + /* [0x18] WRITE REQUEST qualification error mask */ + uint32_t wr_qual; + /* [0x1c] WRITE REQUEST packet counter mask */ + uint32_t wr_err_cnt; + /* [0x20] WRITE REQUEST interrupt generation error mask */ + uint32_t wr_err_int; + /* [0x24] RoCE configuration */ + uint32_t roce_gen_en_1; + /* [0x28] RoCE GRH header fields configuration */ + uint32_t roce_hdr; + /* [0x2c] READ RESPONSE ERROR packets */ + uint32_t rr_error; + /* [0x30] READ RESPONSE correct packets */ + uint32_t rr_ok; + /* [0x34] WRITE REQUEST ERROR packets */ + uint32_t wr_error; + /* [0x38] WRITE REQUEST correct packets */ + uint32_t wr_ok; + uint32_t rsrvd[49]; +}; +struct al_ec_roce_qp_db { + /* [0x0] QP value for comparing to the received DEST_QP */ + uint32_t qp_val; + /* [0x4] RoCE configuration */ + uint32_t roce_qp_cfg_1; + /* [0x8] RoCE configuration */ + uint32_t roce_qp_cfg_2; + /* [0xc] READ_RESPONSE Control */ + uint32_t rr_ctrl; + /* [0x10] WRITE REQUEST Control */ + uint32_t wr_ctrl; + /* [0x14] WRITE REQUEST PSN control */ + uint32_t wr_psn; + /* [0x18] WRITE REQUEST, LAST PSN value that the SW sampled */ + uint32_t wr_psn_last_sw; + /* [0x1c] WRITE REQUEST, LAST PSN value that the HW sampled ( ... */ + uint32_t wr_psn_last_hw; + /* [0x20] WRITE REQUEST, LAST PSN value that the HW sampled ( ... */ + uint32_t wr_psn_hw; + /* [0x24] Address translation offset high */ + uint32_t addr_offset_h; + /* [0x28] Address translation offset low */ + uint32_t addr_offset_l; + /* [0x2c] Address translation mask high */ + uint32_t addr_mask_h; + /* [0x30] Address translation mask low */ + uint32_t addr_mask_l; + /* [0x34] PKEY */ + uint32_t p_key; + /* [0x38] RKEY */ + uint32_t r_key; + /* [0x3c] RoCE status information */ + uint32_t roce_status; + /* [0x40] READ REQUEST FIFO */ + uint32_t rr_fifo_1; + /* [0x44] READ REQUEST FIFO */ + uint32_t rr_fifo_2; + /* [0x48] READ REQUEST FIFO */ + uint32_t rr_fifo_3; + /* [0x4c] READ REQUEST FIFO */ + uint32_t rr_fifo_4; + /* [0x50] READ REQUEST FIFO status */ + uint32_t rr_fifo_status; + /* [0x54] READ REQUEST FIFO control */ + uint32_t rr_fifo_ctrl; +}; +struct al_ec_gen_v3 { + /* [0x0] Bypass enable */ + uint32_t bypass; + /* [0x4] Rx Completion descriptor */ + uint32_t rx_comp_desc; + uint32_t rsrvd[2]; +}; +struct al_ec_tfw_v3 { + /* [0x0] Generic protocol detect Cam compare table address */ + uint32_t tx_gpd_cam_addr; + /* [0x4] Tx Generic protocol detect Cam compare data_1 (low) ... */ + uint32_t tx_gpd_cam_data_1; + /* [0x8] Tx Generic protocol detect Cam compare data_2 (high ... */ + uint32_t tx_gpd_cam_data_2; + /* [0xc] Tx Generic protocol detect Cam compare mask_1 (low) ... */ + uint32_t tx_gpd_cam_mask_1; + /* [0x10] Tx Generic protocol detect Cam compare mask_1 (high ... */ + uint32_t tx_gpd_cam_mask_2; + /* [0x14] Tx Generic protocol detect Cam compare control */ + uint32_t tx_gpd_cam_ctrl; + /* [0x18] Tx Generic crc parameters legacy */ + uint32_t tx_gcp_legacy; + /* [0x1c] Tx Generic crc prameters table address */ + uint32_t tx_gcp_table_addr; + /* [0x20] Tx Generic crc prameters table general */ + uint32_t tx_gcp_table_gen; + /* [0x24] Tx Generic crc parametrs tabel mask word 1 */ + uint32_t tx_gcp_table_mask_1; + /* [0x28] Tx Generic crc parametrs tabel mask word 2 */ + uint32_t tx_gcp_table_mask_2; + /* [0x2c] Tx Generic crc parametrs tabel mask word 3 */ + uint32_t tx_gcp_table_mask_3; + /* [0x30] Tx Generic crc parametrs tabel mask word 4 */ + uint32_t tx_gcp_table_mask_4; + /* [0x34] Tx Generic crc parametrs tabel mask word 5 */ + uint32_t tx_gcp_table_mask_5; + /* [0x38] Tx Generic crc parametrs tabel mask word 6 */ + uint32_t tx_gcp_table_mask_6; + /* [0x3c] Tx Generic crc parametrs tabel crc init */ + uint32_t tx_gcp_table_crc_init; + /* [0x40] Tx Generic crc parametrs tabel result configuration ... */ + uint32_t tx_gcp_table_res; + /* [0x44] Tx Generic crc parameters table alu opcode */ + uint32_t tx_gcp_table_alu_opcode; + /* [0x48] Tx Generic crc parameters table alu opsel */ + uint32_t tx_gcp_table_alu_opsel; + /* [0x4c] Tx Generic crc parameters table alu constant value */ + uint32_t tx_gcp_table_alu_val; + /* [0x50] Tx CRC/Checksum replace */ + uint32_t crc_csum_replace; + /* [0x54] CRC/Checksum replace table address */ + uint32_t crc_csum_replace_table_addr; + /* [0x58] CRC/Checksum replace table */ + uint32_t crc_csum_replace_table; + uint32_t rsrvd; +}; +struct al_ec_rfw_v3 { + /* [0x0] Rx Generic protocol detect Cam compare table addres ... */ + uint32_t rx_gpd_cam_addr; + /* [0x4] Rx Generic protocol detect Cam compare data_1 (low) ... */ + uint32_t rx_gpd_cam_data_1; + /* [0x8] Rx Generic protocol detect Cam compare data_2 (high ... */ + uint32_t rx_gpd_cam_data_2; + /* [0xc] Rx Generic protocol detect Cam compare mask_1 (low) ... */ + uint32_t rx_gpd_cam_mask_1; + /* [0x10] Rx Generic protocol detect Cam compare mask_1 (high ... */ + uint32_t rx_gpd_cam_mask_2; + /* [0x14] Rx Generic protocol detect Cam compare control */ + uint32_t rx_gpd_cam_ctrl; + /* [0x18] Generic protocol detect Parser result vector pointe ... */ + uint32_t gpd_p1; + /* [0x1c] Generic protocol detect Parser result vector pointe ... */ + uint32_t gpd_p2; + /* [0x20] Generic protocol detect Parser result vector pointe ... */ + uint32_t gpd_p3; + /* [0x24] Generic protocol detect Parser result vector pointe ... */ + uint32_t gpd_p4; + /* [0x28] Generic protocol detect Parser result vector pointe ... */ + uint32_t gpd_p5; + /* [0x2c] Generic protocol detect Parser result vector pointe ... */ + uint32_t gpd_p6; + /* [0x30] Generic protocol detect Parser result vector pointe ... */ + uint32_t gpd_p7; + /* [0x34] Generic protocol detect Parser result vector pointe ... */ + uint32_t gpd_p8; + /* [0x38] Rx Generic crc parameters legacy */ + uint32_t rx_gcp_legacy; + /* [0x3c] Rx Generic crc prameters table address */ + uint32_t rx_gcp_table_addr; + /* [0x40] Rx Generic crc prameters table general */ + uint32_t rx_gcp_table_gen; + /* [0x44] Rx Generic crc parametrs tabel mask word 1 */ + uint32_t rx_gcp_table_mask_1; + /* [0x48] Rx Generic crc parametrs tabel mask word 2 */ + uint32_t rx_gcp_table_mask_2; + /* [0x4c] Rx Generic crc parametrs tabel mask word 3 */ + uint32_t rx_gcp_table_mask_3; + /* [0x50] Rx Generic crc parametrs tabel mask word 4 */ + uint32_t rx_gcp_table_mask_4; + /* [0x54] Rx Generic crc parametrs tabel mask word 5 */ + uint32_t rx_gcp_table_mask_5; + /* [0x58] Rx Generic crc parametrs tabel mask word 6 */ + uint32_t rx_gcp_table_mask_6; + /* [0x5c] Rx Generic crc parametrs tabel crc init */ + uint32_t rx_gcp_table_crc_init; + /* [0x60] Rx Generic crc parametrs tabel result configuration ... */ + uint32_t rx_gcp_table_res; + /* [0x64] Rx Generic crc parameters table alu opcode */ + uint32_t rx_gcp_table_alu_opcode; + /* [0x68] Rx Generic crc parameters table alu opsel */ + uint32_t rx_gcp_table_alu_opsel; + /* [0x6c] Rx Generic crc parameters table alu constant value ... */ + uint32_t rx_gcp_table_alu_val; + /* [0x70] Generic crc engin parameters alu Parser result vect ... */ + uint32_t rx_gcp_alu_p1; + /* [0x74] Generic crc engine parameters alu Parser result vec ... */ + uint32_t rx_gcp_alu_p2; + /* [0x78] Header split control table address */ + uint32_t hs_ctrl_table_addr; + /* [0x7c] Header split control table */ + uint32_t hs_ctrl_table; + /* [0x80] Header split control alu opcode */ + uint32_t hs_ctrl_table_alu_opcode; + /* [0x84] Header split control alu opsel */ + uint32_t hs_ctrl_table_alu_opsel; + /* [0x88] Header split control alu constant value */ + uint32_t hs_ctrl_table_alu_val; + /* [0x8c] Header split control configuration */ + uint32_t hs_ctrl_cfg; + /* [0x90] Header split control alu Parser result vector point ... */ + uint32_t hs_ctrl_alu_p1; + /* [0x94] Header split control alu Parser result vector point ... */ + uint32_t hs_ctrl_alu_p2; +}; +struct al_ec_crypto { + /* [0x0] Tx inline crypto configuration */ + uint32_t tx_config; + /* [0x4] Rx inline crypto configuration */ + uint32_t rx_config; + /* [0x8] inline XTS alpha [31:0] */ + uint32_t xts_alpha_1; + /* [0xc] inline XTS alpha [63:32] */ + uint32_t xts_alpha_2; + /* [0x10] inline XTS alpha [95:64] */ + uint32_t xts_alpha_3; + /* [0x14] inline XTS alpha [127:96] */ + uint32_t xts_alpha_4; + /* [0x18] inline XTS sector ID increment [31:0] */ + uint32_t xts_sector_id_1; + /* [0x1c] inline XTS sector ID increment [63:32] */ + uint32_t xts_sector_id_2; + /* [0x20] inline XTS sector ID increment [95:64] */ + uint32_t xts_sector_id_3; + /* [0x24] inline XTS sector ID increment [127:96] */ + uint32_t xts_sector_id_4; +}; + +struct al_ec_regs { + uint32_t rsrvd_0[32]; + struct al_ec_gen gen; /* [0x80] */ + struct al_ec_mac mac; /* [0xc0] */ + struct al_ec_rxf rxf; /* [0x100] */ + struct al_ec_epe epe[2]; /* [0x180] */ + struct al_ec_epe_res epe_res; /* [0x200] */ + struct al_ec_epe_h epe_h[32]; /* [0x280] */ + struct al_ec_epe_p epe_p[32]; /* [0x300] */ + struct al_ec_epe_a epe_a[32]; /* [0x680] */ + struct al_ec_rfw rfw; /* [0x700] */ + struct al_ec_rfw_udma rfw_udma[4]; /* [0x7f0] */ + struct al_ec_rfw_hash rfw_hash[10]; /* [0x800] */ + struct al_ec_rfw_priority rfw_priority[8]; /* [0x828] */ + struct al_ec_rfw_default rfw_default[8]; /* [0x848] */ + struct al_ec_fwd_mac fwd_mac[32]; /* [0x868] */ + struct al_ec_msw msw; /* [0xae8] */ + struct al_ec_tso tso; /* [0xb00] */ + struct al_ec_tso_sel tso_sel[8]; /* [0xb60] */ + struct al_ec_tpe tpe; /* [0xb80] */ + struct al_ec_tpm_udma tpm_udma[4]; /* [0xbc0] */ + struct al_ec_tpm_sel tpm_sel[4]; /* [0xbf0] */ + struct al_ec_tfw tfw; /* [0xc00] */ + struct al_ec_tfw_udma tfw_udma[4]; /* [0xc60] */ + struct al_ec_tmi tmi; /* [0xcc0] */ + struct al_ec_efc efc; /* [0xcd0] */ + struct al_ec_fc_udma fc_udma[4]; /* [0xd00] */ + struct al_ec_tpg_rpa_res tpg_rpa_res; /* [0xf00] */ + struct al_ec_eee eee; /* [0x1000] */ + struct al_ec_stat stat; /* [0x1100] */ + struct al_ec_stat_udma stat_udma[4]; /* [0x1300] */ + struct al_ec_msp msp; /* [0x1700] */ + struct al_ec_msp_p msp_p[32]; /* [0x1740] */ + struct al_ec_msp_c msp_c[32]; /* [0x17c0] */ + struct al_ec_crce crce; /* [0x1b40] */ + struct al_ec_wol wol; /* [0x1b80] */ + uint32_t rsrvd_1[80]; + struct al_ec_pth pth; /* [0x1d00] */ + struct al_ec_pth_egress pth_egress[8]; /* [0x1d80] */ + struct al_ec_pth_db pth_db[16]; /* [0x1e80] */ + struct al_ec_roce roce; /* [0x2000] */ + struct al_ec_roce_qp_db roce_qp_db[16]; /* [0x2100] */ + struct al_ec_gen_v3 gen_v3; /* [0x2680] */ + struct al_ec_tfw_v3 tfw_v3; /* [0x2690] */ + struct al_ec_rfw_v3 rfw_v3; /* [0x26f0] */ + uint32_t rsrvd_2[2]; + struct al_ec_crypto crypto; /* [0x2790] */ +}; + + +/* +* Registers Fields +*/ + + +/**** version register ****/ +/* Revision number (Minor) */ +#define EC_GEN_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF +#define EC_GEN_VERSION_RELEASE_NUM_MINOR_SHIFT 0 +/* Revision number (Major) */ +#define EC_GEN_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00 +#define EC_GEN_VERSION_RELEASE_NUM_MAJOR_SHIFT 8 +/* Day of release */ +#define EC_GEN_VERSION_DATE_DAY_MASK 0x001F0000 +#define EC_GEN_VERSION_DATE_DAY_SHIFT 16 +/* Month of release */ +#define EC_GEN_VERSION_DATA_MONTH_MASK 0x01E00000 +#define EC_GEN_VERSION_DATA_MONTH_SHIFT 21 +/* Year of release (starting from 2000) */ +#define EC_GEN_VERSION_DATE_YEAR_MASK 0x3E000000 +#define EC_GEN_VERSION_DATE_YEAR_SHIFT 25 +/* Reserved */ +#define EC_GEN_VERSION_RESERVED_MASK 0xC0000000 +#define EC_GEN_VERSION_RESERVED_SHIFT 30 + +/**** en register ****/ +/* Enable Frequency adjust FIFO input controller operation. */ +#define EC_GEN_EN_FAF_IN (1 << 0) +/* Enable Frequency adjust FIFO output controller operation. */ +#define EC_GEN_EN_FAF_OUT (1 << 1) +/* Enable Rx FIFO input controller 1 operation. */ +#define EC_GEN_EN_RXF_IN (1 << 2) +/* Enable Rx FIFO output controller operation. */ +#define EC_GEN_EN_RXF_OUT (1 << 3) +/* Enable Rx forwarding input controller operation. */ +#define EC_GEN_EN_RFW_IN (1 << 4) +/* Enable Rx forwarding output controller operation. */ +#define EC_GEN_EN_RFW_OUT (1 << 5) +/* Enable Rx multi-stream write controller operation. */ +#define EC_GEN_EN_MSW_IN (1 << 6) +/* Enable Rx first parsing engine output operation. */ +#define EC_GEN_EN_RPE_1_OUT (1 << 7) +/* Enable Rx first parsing engine input operation. */ +#define EC_GEN_EN_RPE_1_IN (1 << 8) +/* Enable Rx second parsing engine output operation. */ +#define EC_GEN_EN_RPE_2_OUT (1 << 9) +/* Enable Rx second parsing engine input operation. */ +#define EC_GEN_EN_RPE_2_IN (1 << 10) +/* Enable Rx MACsec parsing engine output operation. */ +#define EC_GEN_EN_RPE_3_OUT (1 << 11) +/* Enable Rx MACsec parsing engine input operation. */ +#define EC_GEN_EN_RPE_3_IN (1 << 12) +/* Enable Loopback FIFO input controller 1 operation. */ +#define EC_GEN_EN_LBF_IN (1 << 13) +/* Enable Rx packet analyzer operation. */ +#define EC_GEN_EN_RPA (1 << 14) + +#define EC_GEN_EN_RESERVED_15 (1 << 15) +/* Enable Tx stream interface operation. */ +#define EC_GEN_EN_TSO (1 << 16) +/* Enable Tx parser input controller operation. */ +#define EC_GEN_EN_TPE_IN (1 << 17) +/* Enable Tx parser output controller operation. */ +#define EC_GEN_EN_TPE_OUT (1 << 18) +/* Enable Tx packet modification operation. */ +#define EC_GEN_EN_TPM (1 << 19) +/* Enable Tx forwarding input controller operation. */ +#define EC_GEN_EN_TFW_IN (1 << 20) +/* Enable Tx forwarding output controller operation. */ +#define EC_GEN_EN_TFW_OUT (1 << 21) +/* Enable Tx MAC interface controller operation. */ +#define EC_GEN_EN_TMI (1 << 22) +/* Enable Tx packet generator operation. */ +#define EC_GEN_EN_TPG (1 << 23) + +#define EC_GEN_EN_RESERVED_31_MASK 0xFF000000 +#define EC_GEN_EN_RESERVED_31_SHIFT 24 + +/**** fifo_en register ****/ +/* Enable Frequency adjust FIFO operation (input). */ +#define EC_GEN_FIFO_EN_FAF_IN (1 << 0) +/* Enable Frequency adjust FIFO operation (output). */ +#define EC_GEN_FIFO_EN_FAF_OUT (1 << 1) +/* Enable Rx FIFO operation. */ +#define EC_GEN_FIFO_EN_RX_FIFO (1 << 2) +/* Enable Rx forwarding FIFO operation. */ +#define EC_GEN_FIFO_EN_RFW_FIFO (1 << 3) +/* Enable Rx multi-stream write FIFO operation */ +#define EC_GEN_FIFO_EN_MSW_FIFO (1 << 4) +/* Enable Rx first parser FIFO operation. */ +#define EC_GEN_FIFO_EN_RPE_1_FIFO (1 << 5) +/* Enable Rx second parser FIFO operation. */ +#define EC_GEN_FIFO_EN_RPE_2_FIFO (1 << 6) +/* Enable Rx MACsec parser FIFO operation. */ +#define EC_GEN_FIFO_EN_RPE_3_FIFO (1 << 7) +/* Enable Loopback FIFO operation. */ +#define EC_GEN_FIFO_EN_LB_FIFO (1 << 8) + +#define EC_GEN_FIFO_EN_RESERVED_15_9_MASK 0x0000FE00 +#define EC_GEN_FIFO_EN_RESERVED_15_9_SHIFT 9 +/* Enable Tx parser FIFO operation. */ +#define EC_GEN_FIFO_EN_TPE_FIFO (1 << 16) +/* Enable Tx forwarding FIFO operation. */ +#define EC_GEN_FIFO_EN_TFW_FIFO (1 << 17) + +#define EC_GEN_FIFO_EN_RESERVED_31_18_MASK 0xFFFC0000 +#define EC_GEN_FIFO_EN_RESERVED_31_18_SHIFT 18 + +/**** l2 register ****/ +/* Size of a 802.3 Ethernet header (DA+SA) */ +#define EC_GEN_L2_SIZE_802_3_MASK 0x0000003F +#define EC_GEN_L2_SIZE_802_3_SHIFT 0 +/* Size of a 802.3 + MACsec 8 byte header */ +#define EC_GEN_L2_SIZE_802_3_MS_8_MASK 0x00003F00 +#define EC_GEN_L2_SIZE_802_3_MS_8_SHIFT 8 +/* Offset of the L2 header from the beginning of the packet. */ +#define EC_GEN_L2_OFFSET_MASK 0x7F000000 +#define EC_GEN_L2_OFFSET_SHIFT 24 + +/**** cfg_i register ****/ +/* IPv4 protocol index */ +#define EC_GEN_CFG_I_IPV4_INDEX_MASK 0x0000001F +#define EC_GEN_CFG_I_IPV4_INDEX_SHIFT 0 +/* IPv6 protocol index */ +#define EC_GEN_CFG_I_IPV6_INDEX_MASK 0x000003E0 +#define EC_GEN_CFG_I_IPV6_INDEX_SHIFT 5 +/* TCP protocol index */ +#define EC_GEN_CFG_I_TCP_INDEX_MASK 0x00007C00 +#define EC_GEN_CFG_I_TCP_INDEX_SHIFT 10 +/* UDP protocol index */ +#define EC_GEN_CFG_I_UDP_INDEX_MASK 0x000F8000 +#define EC_GEN_CFG_I_UDP_INDEX_SHIFT 15 +/* MACsec with 8 bytes SecTAG */ +#define EC_GEN_CFG_I_MACSEC_8_INDEX_MASK 0x01F00000 +#define EC_GEN_CFG_I_MACSEC_8_INDEX_SHIFT 20 +/* MACsec with 16 bytes SecTAG */ +#define EC_GEN_CFG_I_MACSEC_16_INDEX_MASK 0x3E000000 +#define EC_GEN_CFG_I_MACSEC_16_INDEX_SHIFT 25 + +/**** cfg_i_ext register ****/ +/* FcoE protocol index */ +#define EC_GEN_CFG_I_EXT_FCOE_INDEX_MASK 0x0000001F +#define EC_GEN_CFG_I_EXT_FCOE_INDEX_SHIFT 0 +/* RoCE protocol index */ +#define EC_GEN_CFG_I_EXT_ROCE_INDEX_L3_1_MASK 0x000003E0 +#define EC_GEN_CFG_I_EXT_ROCE_INDEX_L3_1_SHIFT 5 +/* RoCE protocol index */ +#define EC_GEN_CFG_I_EXT_ROCE_INDEX_L3_2_MASK 0x00007C00 +#define EC_GEN_CFG_I_EXT_ROCE_INDEX_L3_2_SHIFT 10 +/* RoCE protocol index */ +#define EC_GEN_CFG_I_EXT_ROCE_INDEX_L4_MASK 0x000F8000 +#define EC_GEN_CFG_I_EXT_ROCE_INDEX_L4_SHIFT 15 + +/**** en_ext register ****/ +/* Enable Usage of Ethernet port memories for testing */ +#define EC_GEN_EN_EXT_MEM_FOR_TEST_MASK 0x0000000F +#define EC_GEN_EN_EXT_MEM_FOR_TEST_SHIFT 0 +#define EC_GEN_EN_EXT_MEM_FOR_TEST_VAL_EN \ + (0xa << EC_GEN_EN_EXT_MEM_FOR_TEST_SHIFT) +#define EC_GEN_EN_EXT_MEM_FOR_TEST_VAL_DIS \ + (0x0 << EC_GEN_EN_EXT_MEM_FOR_TEST_SHIFT) +/* Enable MAC loop back (Rx --> Tx, after MAC layer) for 802 */ +#define EC_GEN_EN_EXT_MAC_LB (1 << 4) +/* CRC forward value for the MAC Tx when working in loopback mod ... */ +#define EC_GEN_EN_EXT_MAC_LB_CRC_FWD (1 << 5) +/* Ready signal configuration when in loopback mode:00 - Ready f ... */ +#define EC_GEN_EN_EXT_MAC_LB_READY_CFG_MASK 0x000000C0 +#define EC_GEN_EN_EXT_MAC_LB_READY_CFG_SHIFT 6 +/* Bypass the PTH completion update. */ +#define EC_GEN_EN_EXT_PTH_COMPLETION_BYPASS (1 << 16) +/* Selection between the 1G and 10G MAC: +0 - 1G +1 - 10G */ +#define EC_GEN_EN_EXT_PTH_1_10_SEL (1 << 17) +/* avoid timestamping every pkt in 1G */ +#define EC_GEN_EN_EXT_PTH_CFG_1G_TIMESTAMP_OPT (1 << 18) +/* Selection between descriptor caching options (WORD selection) ... */ +#define EC_GEN_EN_EXT_CACHE_WORD_SPLIT (1 << 20) + +/**** gen register ****/ +/* Enable swap of input byte order */ +#define EC_MAC_GEN_SWAP_IN_BYTE (1 << 0) + +/**** min_pkt register ****/ +/* Minimum packet size */ +#define EC_MAC_MIN_PKT_SIZE_MASK 0x000FFFFF +#define EC_MAC_MIN_PKT_SIZE_SHIFT 0 + +/**** max_pkt register ****/ +/* Maximum packet size */ +#define EC_MAC_MAX_PKT_SIZE_MASK 0x000FFFFF +#define EC_MAC_MAX_PKT_SIZE_SHIFT 0 + +/**** cfg_1 register ****/ +/* Drop packet at the ingress0 - Packets are not dropped at the ... */ +#define EC_RXF_CFG_1_DROP_AT_INGRESS (1 << 0) +/* Accept packet criteria at start of packet indication */ +#define EC_RXF_CFG_1_SOP_ACCEPT (1 << 1) +/* Select the arbiter between Rx packets and Tx packets (packets ... */ +#define EC_RXF_CFG_1_ARB_SEL (1 << 2) +/* Arbiter priority when strict priority is selected in arb_sel0 ... */ +#define EC_RXF_CFG_1_ARB_P (1 << 3) +/* Force loopback operation */ +#define EC_RXF_CFG_1_FORCE_LB (1 << 4) +/* Forwarding selection between Rx path and/or packet analyzer */ +#define EC_RXF_CFG_1_FWD_SEL_MASK 0x00000300 +#define EC_RXF_CFG_1_FWD_SEL_SHIFT 8 + +/**** cfg_2 register ****/ +/* FIFO USED threshold for accepting new packets, low threshold ... */ +#define EC_RXF_CFG_2_FIFO_USED_TH_L_MASK 0x0000FFFF +#define EC_RXF_CFG_2_FIFO_USED_TH_L_SHIFT 0 +/* FIFO USED threshold for accepting new packets, high threshold ... */ +#define EC_RXF_CFG_2_FIFO_USED_TH_H_MASK 0xFFFF0000 +#define EC_RXF_CFG_2_FIFO_USED_TH_H_SHIFT 16 + +/**** rd_fifo register ****/ +/* Minimum number of entries in the data FIFO to start reading p ... */ +#define EC_RXF_RD_FIFO_TH_DATA_MASK 0x0000FFFF +#define EC_RXF_RD_FIFO_TH_DATA_SHIFT 0 +/* Enable cut through operation */ +#define EC_RXF_RD_FIFO_EN_CUT_TH (1 << 16) + +/**** wr_fifo register ****/ + +#define EC_RXF_WR_FIFO_TH_DATA_MASK 0x0000FFFF +#define EC_RXF_WR_FIFO_TH_DATA_SHIFT 0 + +#define EC_RXF_WR_FIFO_TH_INFO_MASK 0xFFFF0000 +#define EC_RXF_WR_FIFO_TH_INFO_SHIFT 16 + +/**** lb_fifo register ****/ + +#define EC_RXF_LB_FIFO_TH_DATA_MASK 0x0000FFFF +#define EC_RXF_LB_FIFO_TH_DATA_SHIFT 0 + +#define EC_RXF_LB_FIFO_TH_INFO_MASK 0xFFFF0000 +#define EC_RXF_LB_FIFO_TH_INFO_SHIFT 16 + +/**** cfg_lb register ****/ +/* FIFO USED threshold for accepting new packets */ +#define EC_RXF_CFG_LB_FIFO_USED_TH_INT_MASK 0x0000FFFF +#define EC_RXF_CFG_LB_FIFO_USED_TH_INT_SHIFT 0 +/* FIFO USED threshold for generating ready for the Tx path */ +#define EC_RXF_CFG_LB_FIFO_USED_TH_EXT_MASK 0xFFFF0000 +#define EC_RXF_CFG_LB_FIFO_USED_TH_EXT_SHIFT 16 + +/**** out_drop register ****/ + +#define EC_RXF_OUT_DROP_MAC_ERR (1 << 0) + +#define EC_RXF_OUT_DROP_MAC_COL (1 << 1) + +#define EC_RXF_OUT_DROP_MAC_DEC (1 << 2) + +#define EC_RXF_OUT_DROP_MAC_LEN (1 << 3) + +#define EC_RXF_OUT_DROP_MAC_PHY (1 << 4) + +#define EC_RXF_OUT_DROP_MAC_FIFO (1 << 5) + +#define EC_RXF_OUT_DROP_MAC_FCS (1 << 6) + +#define EC_RXF_OUT_DROP_MAC_ETYPE (1 << 7) + +#define EC_RXF_OUT_DROP_EC_LEN (1 << 8) + +#define EC_RXF_OUT_DROP_EC_FIFO (1 << 9) + +/**** parse_cfg register ****/ +/* MAX number of beats for packet parsing */ +#define EC_EPE_PARSE_CFG_MAX_BEATS_MASK 0x000000FF +#define EC_EPE_PARSE_CFG_MAX_BEATS_SHIFT 0 +/* MAX number of parsing iterations for packet parsing */ +#define EC_EPE_PARSE_CFG_MAX_ITER_MASK 0x0000FF00 +#define EC_EPE_PARSE_CFG_MAX_ITER_SHIFT 8 + +/**** act_table_addr register ****/ +/* Address for accessing the table */ +#define EC_EPE_ACT_TABLE_ADDR_VAL_MASK 0x0000001F +#define EC_EPE_ACT_TABLE_ADDR_VAL_SHIFT 0 + +/**** act_table_data_1 register ****/ +/* Table data[5:0] - Offset to next protocol [bytes][6] - Next p ... */ +#define EC_EPE_ACT_TABLE_DATA_1_VAL_MASK 0x03FFFFFF +#define EC_EPE_ACT_TABLE_DATA_1_VAL_SHIFT 0 + +/**** act_table_data_2 register ****/ +/* Table Data [8:0] - Offset to data in the packet [bits][17:9] ... */ +#define EC_EPE_ACT_TABLE_DATA_2_VAL_MASK 0x1FFFFFFF +#define EC_EPE_ACT_TABLE_DATA_2_VAL_SHIFT 0 + +/**** act_table_data_3 register ****/ +/* Table Data [8:0] - Offset to data in the packet [bits] [17:9 ... */ +#define EC_EPE_ACT_TABLE_DATA_3_VAL_MASK 0x1FFFFFFF +#define EC_EPE_ACT_TABLE_DATA_3_VAL_SHIFT 0 + +/**** act_table_data_4 register ****/ +/* Table data[7:0] - Offset to header length location in the pac ... */ +#define EC_EPE_ACT_TABLE_DATA_4_VAL_MASK 0x0FFFFFFF +#define EC_EPE_ACT_TABLE_DATA_4_VAL_SHIFT 0 + +/**** act_table_data_6 register ****/ +/* Table data[0] - WR header length[10:1] - Write header length ... */ +#define EC_EPE_ACT_TABLE_DATA_6_VAL_MASK 0x007FFFFF +#define EC_EPE_ACT_TABLE_DATA_6_VAL_SHIFT 0 + +/**** res_in register ****/ +/* Selector for input parse_en0 - Input vector1 - Default value ... */ +#define EC_EPE_RES_IN_SEL_PARSE_EN (1 << 0) +/* Selector for input protocol_index 0 - Input vector 1 - Defaul ... */ +#define EC_EPE_RES_IN_SEL_PROT_INDEX (1 << 1) +/* Selector for input hdr_offset 0 - Input vector 1 - Default va ... */ +#define EC_EPE_RES_IN_SEL_HDR_OFFSET (1 << 2) + +/**** p1 register ****/ +/* Location of the input protocol index in the parser result vec ... */ +#define EC_EPE_RES_P1_IN_PROT_INDEX_MASK 0x000003FF +#define EC_EPE_RES_P1_IN_PROT_INDEX_SHIFT 0 + +/**** p2 register ****/ +/* Location of the input offset in the parser result vector */ +#define EC_EPE_RES_P2_IN_OFFSET_MASK 0x000003FF +#define EC_EPE_RES_P2_IN_OFFSET_SHIFT 0 + +/**** p3 register ****/ +/* Location of the input parse enable in the parser result vecto ... */ +#define EC_EPE_RES_P3_IN_PARSE_EN_MASK 0x000003FF +#define EC_EPE_RES_P3_IN_PARSE_EN_SHIFT 0 + +/**** p4 register ****/ +/* Location of the control bits in the parser result vector */ +#define EC_EPE_RES_P4_CTRL_BITS_MASK 0x000003FF +#define EC_EPE_RES_P4_CTRL_BITS_SHIFT 0 + +/**** p5 register ****/ +/* Location of the MAC DA in the parser result vector */ +#define EC_EPE_RES_P5_DA_MASK 0x000003FF +#define EC_EPE_RES_P5_DA_SHIFT 0 + +/**** p6 register ****/ +/* Location of the MAC SA in the parser result vector */ +#define EC_EPE_RES_P6_SA_MASK 0x000003FF +#define EC_EPE_RES_P6_SA_SHIFT 0 + +/**** p7 register ****/ +/* Location of the first VLAN in the parser result vector */ +#define EC_EPE_RES_P7_VLAN_1_MASK 0x000003FF +#define EC_EPE_RES_P7_VLAN_1_SHIFT 0 + +/**** p8 register ****/ +/* Location of the second VLAN in the parser result vector */ +#define EC_EPE_RES_P8_VLAN_2_MASK 0x000003FF +#define EC_EPE_RES_P8_VLAN_2_SHIFT 0 + +/**** p9 register ****/ +/* Location of the L3 protocol index in the parser result vector ... */ +#define EC_EPE_RES_P9_L3_PROT_INDEX_MASK 0x000003FF +#define EC_EPE_RES_P9_L3_PROT_INDEX_SHIFT 0 + +/**** p10 register ****/ +/* Location of the L3 offset in the parser result vector */ +#define EC_EPE_RES_P10_L3_OFFSET_MASK 0x000003FF +#define EC_EPE_RES_P10_L3_OFFSET_SHIFT 0 + +/**** p11 register ****/ +/* Location of the L3 SIP in the parser result vector */ +#define EC_EPE_RES_P11_L3_SIP_MASK 0x000003FF +#define EC_EPE_RES_P11_L3_SIP_SHIFT 0 + +/**** p12 register ****/ +/* Location of the L3 DIP in the parser result vector */ +#define EC_EPE_RES_P12_L3_DIP_MASK 0x000003FF +#define EC_EPE_RES_P12_L3_DIP_SHIFT 0 + +/**** p13 register ****/ +/* Location of the L3 priority in the parser result vector */ +#define EC_EPE_RES_P13_L3_PRIORITY_MASK 0x000003FF +#define EC_EPE_RES_P13_L3_PRIORITY_SHIFT 0 + +/**** p14 register ****/ +/* Location of the L3 header length in the parser result vector */ +#define EC_EPE_RES_P14_L3_HDR_LEN_MASK 0x000003FF +#define EC_EPE_RES_P14_L3_HDR_LEN_SHIFT 0 + +/**** p15 register ****/ +/* Location of the L4 protocol index in the parser result vector ... */ +#define EC_EPE_RES_P15_L4_PROT_INDEX_MASK 0x000003FF +#define EC_EPE_RES_P15_L4_PROT_INDEX_SHIFT 0 + +/**** p16 register ****/ +/* Location of the L4 source port in the parser result vector */ +#define EC_EPE_RES_P16_L4_SRC_PORT_MASK 0x000003FF +#define EC_EPE_RES_P16_L4_SRC_PORT_SHIFT 0 + +/**** p17 register ****/ +/* Location of the L4 destination port in the parser result vect ... */ +#define EC_EPE_RES_P17_L4_DST_PORT_MASK 0x000003FF +#define EC_EPE_RES_P17_L4_DST_PORT_SHIFT 0 + +/**** p18 register ****/ +/* Location of the L4 offset in the parser result vector */ +#define EC_EPE_RES_P18_L4_OFFSET_MASK 0x000003FF +#define EC_EPE_RES_P18_L4_OFFSET_SHIFT 0 + +/**** p19 register ****/ +/* Location of the Ether type in the parser result vector when w ... */ +#define EC_EPE_RES_P19_WOL_ETYPE_MASK 0x000003FF +#define EC_EPE_RES_P19_WOL_ETYPE_SHIFT 0 + +/**** p20 register ****/ +/* Location of the RoCE QP number field in the parser result vec ... */ +#define EC_EPE_RES_P20_ROCE_QPN_MASK 0x000003FF +#define EC_EPE_RES_P20_ROCE_QPN_SHIFT 0 + +/**** hdr_len register ****/ +/* Value for selecting table 1 */ +#define EC_EPE_H_HDR_LEN_TABLE_1_MASK 0x000000FF +#define EC_EPE_H_HDR_LEN_TABLE_1_SHIFT 0 +/* Value for selecting table 2 */ +#define EC_EPE_H_HDR_LEN_TABLE_2_MASK 0x00FF0000 +#define EC_EPE_H_HDR_LEN_TABLE_2_SHIFT 16 + +/**** comp_data register ****/ +/* Data 1 for comparison */ +#define EC_EPE_P_COMP_DATA_DATA_1_MASK 0x0000FFFF +#define EC_EPE_P_COMP_DATA_DATA_1_SHIFT 0 +/* Data 2 for comparison +[18:16] - Stage +[24:19] - Branch ID */ +#define EC_EPE_P_COMP_DATA_DATA_2_MASK 0x01FF0000 +#define EC_EPE_P_COMP_DATA_DATA_2_SHIFT 16 + +/**** comp_mask register ****/ +/* Data 1 for comparison */ +#define EC_EPE_P_COMP_MASK_DATA_1_MASK 0x0000FFFF +#define EC_EPE_P_COMP_MASK_DATA_1_SHIFT 0 +/* Data 2 for comparison +[18:16] - Stage +[24:19] - Branch ID */ +#define EC_EPE_P_COMP_MASK_DATA_2_MASK 0x01FF0000 +#define EC_EPE_P_COMP_MASK_DATA_2_SHIFT 16 + +/**** comp_ctrl register ****/ +/* Output result value */ +#define EC_EPE_P_COMP_CTRL_RES_MASK 0x0000001F +#define EC_EPE_P_COMP_CTRL_RES_SHIFT 0 +/* Compare command for the data_1 field00 - Compare01 - <=10 - > ... */ +#define EC_EPE_P_COMP_CTRL_CMD_1_MASK 0x00030000 +#define EC_EPE_P_COMP_CTRL_CMD_1_SHIFT 16 +/* Compare command for the data_2 field 00 - Compare 01 - <= 10 ... */ +#define EC_EPE_P_COMP_CTRL_CMD_2_MASK 0x000C0000 +#define EC_EPE_P_COMP_CTRL_CMD_2_SHIFT 18 +/* Entry is valid */ +#define EC_EPE_P_COMP_CTRL_VALID (1 << 31) + +/**** prot_act register ****/ +/* Drop indication for the selected protocol index */ +#define EC_EPE_A_PROT_ACT_DROP (1 << 0) +/* Mapping value Used when mapping the entire protocol index ran ... */ +#define EC_EPE_A_PROT_ACT_MAP_MASK 0x00000F00 +#define EC_EPE_A_PROT_ACT_MAP_SHIFT 8 + +/**** thash_cfg_1 register ****/ +/* Hash function output selection:000 - [7:0]001 - [15:8]010 - [ ... */ +#define EC_RFW_THASH_CFG_1_OUT_SEL_MASK 0x00000007 +#define EC_RFW_THASH_CFG_1_OUT_SEL_SHIFT 0 +/* Selects between hash functions00 - toeplitz01 - CRC-3210 - 0x ... */ +#define EC_RFW_THASH_CFG_1_FUNC_SEL_MASK 0x00000300 +#define EC_RFW_THASH_CFG_1_FUNC_SEL_SHIFT 8 +/* Enable SIP/DIP swap if SIP= 5. */ +#define EC_RFW_LRO_CHECK_IPV4_IHL_1 (1 << 2) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_IPV4_IHL_2 (1 << 3) +/* Compare DSCP to previous packet. */ +#define EC_RFW_LRO_CHECK_IPV4_DSCP (1 << 4) +/* Check that Total length >= lro_ipv4_tlen_val. */ +#define EC_RFW_LRO_CHECK_IPV4_TLEN (1 << 5) +/* Compare to previous packet value +1. */ +#define EC_RFW_LRO_CHECK_IPV4_ID (1 << 6) +/* Compare to lro_ipv4_flags_val with lro_ipv4_flags_mask_0. */ +#define EC_RFW_LRO_CHECK_IPV4_FLAGS_0 (1 << 7) +/* Compare to previous packet flags with lro_ipv4_flags_mask_1. */ +#define EC_RFW_LRO_CHECK_IPV4_FLAGS_1 (1 << 8) +/* Verify that the fragment offset field is 0. */ +#define EC_RFW_LRO_CHECK_IPV4_FRAG (1 << 9) +/* Verify that the TTL value >0. */ +#define EC_RFW_LRO_CHECK_IPV4_TTL_0 (1 << 10) +/* Compare TTL value to previous packet. */ +#define EC_RFW_LRO_CHECK_IPV4_TTL_1 (1 << 11) +/* Compare to previous packet protocol field. */ +#define EC_RFW_LRO_CHECK_IPV4_PROT_0 (1 << 12) +/* Verify that the protocol is TCP or UDP. */ +#define EC_RFW_LRO_CHECK_IPV4_PROT_1 (1 << 13) +/* Verify that the check sum is correct. */ +#define EC_RFW_LRO_CHECK_IPV4_CHECKSUM (1 << 14) +/* Compare SIP to previous packet. */ +#define EC_RFW_LRO_CHECK_IPV4_SIP (1 << 15) +/* Compare DIP to previous packet. */ +#define EC_RFW_LRO_CHECK_IPV4_DIP (1 << 16) + +/**** lro_ipv4 register ****/ +/* Total length minimum value */ +#define EC_RFW_LRO_IPV4_TLEN_VAL_MASK 0x0000FFFF +#define EC_RFW_LRO_IPV4_TLEN_VAL_SHIFT 0 +/* Flags value */ +#define EC_RFW_LRO_IPV4_FLAGS_VAL_MASK 0x00070000 +#define EC_RFW_LRO_IPV4_FLAGS_VAL_SHIFT 16 +/* Flags mask */ +#define EC_RFW_LRO_IPV4_FLAGS_MASK_0_MASK 0x00380000 +#define EC_RFW_LRO_IPV4_FLAGS_MASK_0_SHIFT 19 +/* Flags mask */ +#define EC_RFW_LRO_IPV4_FLAGS_MASK_1_MASK 0x01C00000 +#define EC_RFW_LRO_IPV4_FLAGS_MASK_1_SHIFT 22 +/* Version value */ +#define EC_RFW_LRO_IPV4_VER_MASK 0xF0000000 +#define EC_RFW_LRO_IPV4_VER_SHIFT 28 + +/**** lro_check_ipv6 register ****/ +/* Check version field */ +#define EC_RFW_LRO_CHECK_IPV6_VER (1 << 0) +/* Compare TC to previous packet. */ +#define EC_RFW_LRO_CHECK_IPV6_TC (1 << 1) +/* Compare flow label field to previous packet. */ +#define EC_RFW_LRO_CHECK_IPV6_FLOW (1 << 2) +/* Check that Total length >= lro_ipv6_pen_val. */ +#define EC_RFW_LRO_CHECK_IPV6_PLEN (1 << 3) +/* Compare to previous packet next header field. */ +#define EC_RFW_LRO_CHECK_IPV6_NEXT_0 (1 << 4) +/* Verify that the next header is TCP or UDP. */ +#define EC_RFW_LRO_CHECK_IPV6_NEXT_1 (1 << 5) +/* Verify that hop limit is >0. */ +#define EC_RFW_LRO_CHECK_IPV6_HOP_0 (1 << 6) +/* Compare hop limit to previous packet. */ +#define EC_RFW_LRO_CHECK_IPV6_HOP_1 (1 << 7) +/* Compare SIP to previous packet. */ +#define EC_RFW_LRO_CHECK_IPV6_SIP (1 << 8) +/* Compare DIP to previous packet. */ +#define EC_RFW_LRO_CHECK_IPV6_DIP (1 << 9) + +/**** lro_ipv6 register ****/ +/* Payload length minimum value */ +#define EC_RFW_LRO_IPV6_PLEN_VAL_MASK 0x0000FFFF +#define EC_RFW_LRO_IPV6_PLEN_VAL_SHIFT 0 +/* Version value */ +#define EC_RFW_LRO_IPV6_VER_MASK 0x0F000000 +#define EC_RFW_LRO_IPV6_VER_SHIFT 24 + +/**** lro_check_tcp register ****/ +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_TCP_SRC_PORT (1 << 0) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_TCP_DST_PORT (1 << 1) +/* If (SYN == 1), don't check */ +#define EC_RFW_LRO_CHECK_TCP_SN (1 << 2) +/* Check data offset field == 5. */ +#define EC_RFW_LRO_CHECK_TCP_OFFSET_0 (1 << 3) +/* Check data offset field >= 5. */ +#define EC_RFW_LRO_CHECK_TCP_OFFSET_1 (1 << 4) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_TCP_OFFSET_2 (1 << 5) +/* Compare reserved field to lro_tcp_res. */ +#define EC_RFW_LRO_CHECK_TCP_RES (1 << 6) +/* Compare to lro_tcp_ecn_val and lro_tcp_ecn_mask_0. */ +#define EC_RFW_LRO_CHECK_TCP_ECN_0 (1 << 7) +/* Compare to previous packet ECN field with lro_tcp_ecn_mask_1 */ +#define EC_RFW_LRO_CHECK_TCP_ECN_1 (1 << 8) +/* Compare to lro_tcp_ctrl_val and lro_tcp_ctrl_mask_0. */ +#define EC_RFW_LRO_CHECK_TCP_CTRL_0 (1 << 9) +/* Compare to previous packet ECN field with lro_tcp_ctrl_mask_1 */ +#define EC_RFW_LRO_CHECK_TCP_CTRL_1 (1 << 10) +/* Verify that check sum is correct. */ +#define EC_RFW_LRO_CHECK_TCP_CHECKSUM (1 << 11) + +/**** lro_tcp register ****/ +/* Reserved field default value */ +#define EC_RFW_LRO_TCP_RES_MASK 0x00000007 +#define EC_RFW_LRO_TCP_RES_SHIFT 0 +/* ECN field value */ +#define EC_RFW_LRO_TCP_ECN_VAL_MASK 0x00000038 +#define EC_RFW_LRO_TCP_ECN_VAL_SHIFT 3 +/* ECN field mask */ +#define EC_RFW_LRO_TCP_ECN_MASK_0_MASK 0x000001C0 +#define EC_RFW_LRO_TCP_ECN_MASK_0_SHIFT 6 +/* ECN field mask */ +#define EC_RFW_LRO_TCP_ECN_MASK_1_MASK 0x00000E00 +#define EC_RFW_LRO_TCP_ECN_MASK_1_SHIFT 9 +/* Control field value */ +#define EC_RFW_LRO_TCP_CTRL_VAL_MASK 0x0003F000 +#define EC_RFW_LRO_TCP_CTRL_VAL_SHIFT 12 +/* Control field mask */ +#define EC_RFW_LRO_TCP_CTRL_MASK_0_MASK 0x00FC0000 +#define EC_RFW_LRO_TCP_CTRL_MASK_0_SHIFT 18 +/* Control field mask */ +#define EC_RFW_LRO_TCP_CTRL_MASK_1_MASK 0x3F000000 +#define EC_RFW_LRO_TCP_CTRL_MASK_1_SHIFT 24 + +/**** lro_check_udp register ****/ +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_UDP_SRC_PORT (1 << 0) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_UDP_DST_PORT (1 << 1) +/* Verify that check sum is correct. */ +#define EC_RFW_LRO_CHECK_UDP_CHECKSUM (1 << 2) + +/**** lro_check_l2 register ****/ +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_L2_MAC_DA (1 << 0) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_L2_MAC_SA (1 << 1) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_L2_VLAN_1_EXIST (1 << 2) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_L2_VLAN_1_VID (1 << 3) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_L2_VLAN_1_CFI (1 << 4) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_L2_VLAN_1_PBITS (1 << 5) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_L2_VLAN_2_EXIST (1 << 6) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_L2_VLAN_2_VID (1 << 7) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_L2_VLAN_2_CFI (1 << 8) +/* Compare to previous packet. */ +#define EC_RFW_LRO_CHECK_L2_VLAN_2_PBITS (1 << 9) +/* Verify that the FCS is correct. */ +#define EC_RFW_LRO_CHECK_L2_FCS (1 << 10) + +/**** lro_check_gen register ****/ +/* Compare to previous packet */ +#define EC_RFW_LRO_CHECK_GEN_UDMA (1 << 0) +/* Compare to previous packet */ +#define EC_RFW_LRO_CHECK_GEN_QUEUE (1 << 1) + +/**** lro_store register ****/ +/* Store packet information if protocol match. */ +#define EC_RFW_LRO_STORE_IPV4 (1 << 0) +/* Store packet information if protocol match. */ +#define EC_RFW_LRO_STORE_IPV6 (1 << 1) +/* Store packet information if protocol match. */ +#define EC_RFW_LRO_STORE_TCP (1 << 2) +/* Store packet information if protocol match. */ +#define EC_RFW_LRO_STORE_UDP (1 << 3) +/* Store packet if IPv4 flags match the register value with mask */ +#define EC_RFW_LRO_STORE_IPV4_FLAGS_VAL_MASK 0x00000070 +#define EC_RFW_LRO_STORE_IPV4_FLAGS_VAL_SHIFT 4 +/* Mask for IPv4 flags */ +#define EC_RFW_LRO_STORE_IPV4_FLAGS_MASK_MASK 0x00000380 +#define EC_RFW_LRO_STORE_IPV4_FLAGS_MASK_SHIFT 7 +/* Store packet if TCP control and ECN match the register value ... */ +#define EC_RFW_LRO_STORE_TCP_CTRL_VAL_MASK 0x0007FC00 +#define EC_RFW_LRO_STORE_TCP_CTRL_VAL_SHIFT 10 +/* Mask for TCP control */ +#define EC_RFW_LRO_STORE_TCP_CTRL_MASK_MASK 0x0FF80000 +#define EC_RFW_LRO_STORE_TCP_CTRL_MASK_SHIFT 19 + +/**** vid_table_def register ****/ +/* Table default data (valid only after configuring the table ad ... */ +#define EC_RFW_VID_TABLE_DEF_VAL_MASK 0x0000003F +#define EC_RFW_VID_TABLE_DEF_VAL_SHIFT 0 +/* Default data selection +0 - Default value +1 - Table data out */ +#define EC_RFW_VID_TABLE_DEF_SEL (1 << 6) + +/**** ctrl_table_def register ****/ +/* Control table output for selecting the forwarding MUXs [3:0] ... */ +#define EC_RFW_CTRL_TABLE_DEF_VAL_MASK 0x000FFFFF +#define EC_RFW_CTRL_TABLE_DEF_VAL_SHIFT 0 +/* Default data selection 0 - Default value 1 - Table data out ... */ +#define EC_RFW_CTRL_TABLE_DEF_SEL (1 << 20) + +/**** cfg_a_0 register ****/ +/* Selection of the L3 checksum result in the Metadata00 - L3 ch ... */ +#define EC_RFW_CFG_A_0_META_L3_CHK_RES_SEL_MASK 0x00000003 +#define EC_RFW_CFG_A_0_META_L3_CHK_RES_SEL_SHIFT 0 +/* Selection of the L4 checksum result in the Metadata0 - L4 che ... */ +#define EC_RFW_CFG_A_0_META_L4_CHK_RES_SEL (1 << 2) +/* Selection of the LRO_context_value result in the Metadata0 - ... */ +#define EC_RFW_CFG_A_0_LRO_CONTEXT_SEL (1 << 4) + +/**** thash_cfg_3 register ****/ +/* Enable Hash value for RoCE packets in outer packet. */ +#define EC_RFW_THASH_CFG_3_ENABLE_OUTER_ROCE (1 << 0) +/* Enable Hash value for RoCE packets in inner packet. */ +#define EC_RFW_THASH_CFG_3_ENABLE_INNER_ROCE (1 << 1) +/* Enable Hash value for FcoE packets in outer packet. */ +#define EC_RFW_THASH_CFG_3_ENABLE_OUTER_FCOE (1 << 2) +/* Enable Hash value for FcoE packets in inner packet. */ +#define EC_RFW_THASH_CFG_3_ENABLE_INNER_FCOE (1 << 3) + +/**** thash_mask_outer_ipv6 register ****/ +/* IPv6 source IP address */ +#define EC_RFW_THASH_MASK_OUTER_IPV6_SRC_MASK 0x0000FFFF +#define EC_RFW_THASH_MASK_OUTER_IPV6_SRC_SHIFT 0 +/* IPv6 destination IP address */ +#define EC_RFW_THASH_MASK_OUTER_IPV6_DST_MASK 0xFFFF0000 +#define EC_RFW_THASH_MASK_OUTER_IPV6_DST_SHIFT 16 + +/**** thash_mask_outer register ****/ +/* IPv4 source IP address */ +#define EC_RFW_THASH_MASK_OUTER_IPV4_SRC_MASK 0x0000000F +#define EC_RFW_THASH_MASK_OUTER_IPV4_SRC_SHIFT 0 +/* IPv4 destination IP address */ +#define EC_RFW_THASH_MASK_OUTER_IPV4_DST_MASK 0x000000F0 +#define EC_RFW_THASH_MASK_OUTER_IPV4_DST_SHIFT 4 +/* TCP source port */ +#define EC_RFW_THASH_MASK_OUTER_TCP_SRC_PORT_MASK 0x00000300 +#define EC_RFW_THASH_MASK_OUTER_TCP_SRC_PORT_SHIFT 8 +/* TCP destination port */ +#define EC_RFW_THASH_MASK_OUTER_TCP_DST_PORT_MASK 0x00000C00 +#define EC_RFW_THASH_MASK_OUTER_TCP_DST_PORT_SHIFT 10 +/* UDP source port */ +#define EC_RFW_THASH_MASK_OUTER_UDP_SRC_PORT_MASK 0x00003000 +#define EC_RFW_THASH_MASK_OUTER_UDP_SRC_PORT_SHIFT 12 +/* UDP destination port */ +#define EC_RFW_THASH_MASK_OUTER_UDP_DST_PORT_MASK 0x0000C000 +#define EC_RFW_THASH_MASK_OUTER_UDP_DST_PORT_SHIFT 14 + +/**** thash_mask_inner_ipv6 register ****/ +/* IPv6 source IP address */ +#define EC_RFW_THASH_MASK_INNER_IPV6_SRC_MASK 0x0000FFFF +#define EC_RFW_THASH_MASK_INNER_IPV6_SRC_SHIFT 0 +/* IPv6 destination IP address */ +#define EC_RFW_THASH_MASK_INNER_IPV6_DST_MASK 0xFFFF0000 +#define EC_RFW_THASH_MASK_INNER_IPV6_DST_SHIFT 16 + +/**** thash_mask_inner register ****/ +/* IPv4 source IP address */ +#define EC_RFW_THASH_MASK_INNER_IPV4_SRC_MASK 0x0000000F +#define EC_RFW_THASH_MASK_INNER_IPV4_SRC_SHIFT 0 +/* IPv4 destination IP address */ +#define EC_RFW_THASH_MASK_INNER_IPV4_DST_MASK 0x000000F0 +#define EC_RFW_THASH_MASK_INNER_IPV4_DST_SHIFT 4 +/* TCP source port */ +#define EC_RFW_THASH_MASK_INNER_TCP_SRC_PORT_MASK 0x00000300 +#define EC_RFW_THASH_MASK_INNER_TCP_SRC_PORT_SHIFT 8 +/* TCP destination port */ +#define EC_RFW_THASH_MASK_INNER_TCP_DST_PORT_MASK 0x00000C00 +#define EC_RFW_THASH_MASK_INNER_TCP_DST_PORT_SHIFT 10 +/* UDP source port */ +#define EC_RFW_THASH_MASK_INNER_UDP_SRC_PORT_MASK 0x00003000 +#define EC_RFW_THASH_MASK_INNER_UDP_SRC_PORT_SHIFT 12 +/* UDP destination port */ +#define EC_RFW_THASH_MASK_INNER_UDP_DST_PORT_MASK 0x0000C000 +#define EC_RFW_THASH_MASK_INNER_UDP_DST_PORT_SHIFT 14 + +/**** def_cfg register ****/ +/* Number of padding bytes to add at the beginning of each Ether ... */ +#define EC_RFW_UDMA_DEF_CFG_RX_PAD_MASK 0x0000003F +#define EC_RFW_UDMA_DEF_CFG_RX_PAD_SHIFT 0 + +/**** queue register ****/ +/* Mapping between priority and queue number */ +#define EC_RFW_PRIORITY_QUEUE_MAP_MASK 0x00000003 +#define EC_RFW_PRIORITY_QUEUE_MAP_SHIFT 0 + +/**** opt_1 register ****/ +/* Default UDMA for forwarding */ +#define EC_RFW_DEFAULT_OPT_1_UDMA_MASK 0x0000000F +#define EC_RFW_DEFAULT_OPT_1_UDMA_SHIFT 0 +/* Default priority for forwarding */ +#define EC_RFW_DEFAULT_OPT_1_PRIORITY_MASK 0x00000700 +#define EC_RFW_DEFAULT_OPT_1_PRIORITY_SHIFT 8 +/* Default queue for forwarding */ +#define EC_RFW_DEFAULT_OPT_1_QUEUE_MASK 0x00030000 +#define EC_RFW_DEFAULT_OPT_1_QUEUE_SHIFT 16 + +/**** data_h register ****/ +/* MAC address data */ +#define EC_FWD_MAC_DATA_H_VAL_MASK 0x0000FFFF +#define EC_FWD_MAC_DATA_H_VAL_SHIFT 0 + +/**** mask_h register ****/ +/* MAC address mask */ +#define EC_FWD_MAC_MASK_H_VAL_MASK 0x0000FFFF +#define EC_FWD_MAC_MASK_H_VAL_SHIFT 0 + +/**** ctrl register ****/ +/* Control value for Rx forwarding engine[0] - Drop indication[2 ... */ +#define EC_FWD_MAC_CTRL_RX_VAL_MASK 0x000001FF +#define EC_FWD_MAC_CTRL_RX_VAL_SHIFT 0 + +/* Drop indication */ +#define EC_FWD_MAC_CTRL_RX_VAL_DROP (1 << 0) + +/* control table command input */ +#define EC_FWD_MAC_CTRL_RX_VAL_CTRL_CMD_MASK 0x00000006 +#define EC_FWD_MAC_CTRL_RX_VAL_CTRL_CMD_SHIFT 1 + +/* UDMA selection */ +#define EC_FWD_MAC_CTRL_RX_VAL_UDMA_MASK 0x000000078 +#define EC_FWD_MAC_CTRL_RX_VAL_UDMA_SHIFT 3 + +/* queue number */ +#define EC_FWD_MAC_CTRL_RX_VAL_QID_MASK 0x00000180 +#define EC_FWD_MAC_CTRL_RX_VAL_QID_SHIFT 7 + +/* Entry is valid for Rx forwarding engine. */ +#define EC_FWD_MAC_CTRL_RX_VALID (1 << 15) +/* Control value for Tx forwarding engine */ +#define EC_FWD_MAC_CTRL_TX_VAL_MASK 0x001F0000 +#define EC_FWD_MAC_CTRL_TX_VAL_SHIFT 16 +/* Entry is valid for Tx forwarding engine. */ +#define EC_FWD_MAC_CTRL_TX_VALID (1 << 31) + +/**** uc register ****/ +/* timer max value for waiting for a stream to be ready to accep ... */ +#define EC_MSW_UC_TIMER_MASK 0x0000FFFF +#define EC_MSW_UC_TIMER_SHIFT 0 +/* Drop packet if target queue in the UDMA is full */ +#define EC_MSW_UC_Q_FULL_DROP_MASK 0x000F0000 +#define EC_MSW_UC_Q_FULL_DROP_SHIFT 16 +/* Drop packet if timer expires. */ +#define EC_MSW_UC_TIMER_DROP_MASK 0x0F000000 +#define EC_MSW_UC_TIMER_DROP_SHIFT 24 + +/**** mc register ****/ +/* Timer max value for waiting for a stream to be ready to accep ... */ +#define EC_MSW_MC_TIMER_MASK 0x0000FFFF +#define EC_MSW_MC_TIMER_SHIFT 0 +/* Drop packet if target queue in UDMA is full. */ +#define EC_MSW_MC_Q_FULL_DROP_MASK 0x000F0000 +#define EC_MSW_MC_Q_FULL_DROP_SHIFT 16 +/* Drop packet if timer expires. */ +#define EC_MSW_MC_TIMER_DROP_MASK 0x0F000000 +#define EC_MSW_MC_TIMER_DROP_SHIFT 24 + +/**** bc register ****/ +/* Timer max value for waiting for a stream to be ready to accep ... */ +#define EC_MSW_BC_TIMER_MASK 0x0000FFFF +#define EC_MSW_BC_TIMER_SHIFT 0 +/* Drop packet if target queue in UDMA is full. */ +#define EC_MSW_BC_Q_FULL_DROP_MASK 0x000F0000 +#define EC_MSW_BC_Q_FULL_DROP_SHIFT 16 +/* Drop packet if timer expires. */ +#define EC_MSW_BC_TIMER_DROP_MASK 0x0F000000 +#define EC_MSW_BC_TIMER_DROP_SHIFT 24 + +/**** in_cfg register ****/ +/* Swap input bytes order */ +#define EC_TSO_IN_CFG_SWAP_BYTES (1 << 0) +/* Selects strict priority or round robin scheduling between GDM ... */ +#define EC_TSO_IN_CFG_SEL_SP_RR (1 << 1) +/* Selects scheduler numbering direction */ +#define EC_TSO_IN_CFG_SEL_SCH_DIR (1 << 2) +/* Minimum L2 packet size (not including FCS) */ +#define EC_TSO_IN_CFG_L2_MIN_SIZE_MASK 0x00007F00 +#define EC_TSO_IN_CFG_L2_MIN_SIZE_SHIFT 8 +/* Swap input bytes order */ +#define EC_TSO_IN_CFG_SP_INIT_VAL_MASK 0x000F0000 +#define EC_TSO_IN_CFG_SP_INIT_VAL_SHIFT 16 + +/**** cache_table_addr register ****/ +/* Address for accessing the table */ +#define EC_TSO_CACHE_TABLE_ADDR_VAL_MASK 0x0000000F +#define EC_TSO_CACHE_TABLE_ADDR_VAL_SHIFT 0 + +/**** ctrl_first register ****/ +/* Data to be written into the control BIS. */ +#define EC_TSO_CTRL_FIRST_DATA_MASK 0x000001FF +#define EC_TSO_CTRL_FIRST_DATA_SHIFT 0 +/* Mask for control bits */ +#define EC_TSO_CTRL_FIRST_MASK_MASK 0x01FF0000 +#define EC_TSO_CTRL_FIRST_MASK_SHIFT 16 + +/**** ctrl_middle register ****/ +/* Data to be written into the control BIS. */ +#define EC_TSO_CTRL_MIDDLE_DATA_MASK 0x000001FF +#define EC_TSO_CTRL_MIDDLE_DATA_SHIFT 0 +/* Mask for the control bits */ +#define EC_TSO_CTRL_MIDDLE_MASK_MASK 0x01FF0000 +#define EC_TSO_CTRL_MIDDLE_MASK_SHIFT 16 + +/**** ctrl_last register ****/ +/* Data to be written into the control BIS. */ +#define EC_TSO_CTRL_LAST_DATA_MASK 0x000001FF +#define EC_TSO_CTRL_LAST_DATA_SHIFT 0 +/* Mask for the control bits */ +#define EC_TSO_CTRL_LAST_MASK_MASK 0x01FF0000 +#define EC_TSO_CTRL_LAST_MASK_SHIFT 16 + +/**** cfg_add_0 register ****/ +/* MSS selection option:0 - MSS value is selected using MSS_sel ... */ +#define EC_TSO_CFG_ADD_0_MSS_SEL (1 << 0) + +/**** cfg_tunnel register ****/ +/* Enable TSO with tunnelling */ +#define EC_TSO_CFG_TUNNEL_EN_TUNNEL_TSO (1 << 0) +/* Enable outer UDP checksum update */ +#define EC_TSO_CFG_TUNNEL_EN_UDP_CHKSUM (1 << 8) +/* Enable outer UDP length update */ +#define EC_TSO_CFG_TUNNEL_EN_UDP_LEN (1 << 9) +/* Enable outer Ip6 length update */ +#define EC_TSO_CFG_TUNNEL_EN_IPV6_PLEN (1 << 10) +/* Enable outer IPv4 checksum update */ +#define EC_TSO_CFG_TUNNEL_EN_IPV4_CHKSUM (1 << 11) +/* Enable outer IPv4 Identification update */ +#define EC_TSO_CFG_TUNNEL_EN_IPV4_IDEN (1 << 12) +/* Enable outer IPv4 length update */ +#define EC_TSO_CFG_TUNNEL_EN_IPV4_TLEN (1 << 13) + +/**** mss register ****/ +/* MSS value */ +#define EC_TSO_SEL_MSS_VAL_MASK 0x000FFFFF +#define EC_TSO_SEL_MSS_VAL_SHIFT 0 + +/**** parse register ****/ +/* Max number of bus beats for parsing */ +#define EC_TPE_PARSE_MAX_BEATS_MASK 0x0000FFFF +#define EC_TPE_PARSE_MAX_BEATS_SHIFT 0 + +/**** vlan_data register ****/ +/* UDMA default VLAN 1 data */ +#define EC_TPM_UDMA_VLAN_DATA_DEF_1_MASK 0x0000FFFF +#define EC_TPM_UDMA_VLAN_DATA_DEF_1_SHIFT 0 +/* UDMA default VLAN 2 data */ +#define EC_TPM_UDMA_VLAN_DATA_DEF_2_MASK 0xFFFF0000 +#define EC_TPM_UDMA_VLAN_DATA_DEF_2_SHIFT 16 + +/**** mac_sa_2 register ****/ +/* MAC source address data [47:32] */ +#define EC_TPM_UDMA_MAC_SA_2_H_VAL_MASK 0x0000FFFF +#define EC_TPM_UDMA_MAC_SA_2_H_VAL_SHIFT 0 +/* Drop indication for MAC SA spoofing0 – Don't drop */ +#define EC_TPM_UDMA_MAC_SA_2_DROP (1 << 16) +/* Replace indication for MAC SA spoofing 0 - Don't replace */ +#define EC_TPM_UDMA_MAC_SA_2_REPLACE (1 << 17) + +/**** etype register ****/ +/* Ether type value */ +#define EC_TPM_SEL_ETYPE_VAL_MASK 0x0000FFFF +#define EC_TPM_SEL_ETYPE_VAL_SHIFT 0 + +/**** tx_wr_fifo register ****/ +/* Max data beats that can be used in the Tx FIFO */ +#define EC_TFW_TX_WR_FIFO_DATA_TH_MASK 0x0000FFFF +#define EC_TFW_TX_WR_FIFO_DATA_TH_SHIFT 0 +/* Max packets that can be stored in the Tx FIFO */ +#define EC_TFW_TX_WR_FIFO_INFO_TH_MASK 0xFFFF0000 +#define EC_TFW_TX_WR_FIFO_INFO_TH_SHIFT 16 + +/**** tx_vid_table_addr register ****/ +/* Address for accessing the table */ +#define EC_TFW_TX_VID_TABLE_ADDR_VAL_MASK 0x00000FFF +#define EC_TFW_TX_VID_TABLE_ADDR_VAL_SHIFT 0 + +/**** tx_vid_table_data register ****/ +/* Table data (valid only after configuring the table address re ... */ +#define EC_TFW_TX_VID_TABLE_DATA_VAL_MASK 0x0000001F +#define EC_TFW_TX_VID_TABLE_DATA_VAL_SHIFT 0 + +/**** tx_rd_fifo register ****/ +/* Read data threshold when cut through mode is enabled. */ +#define EC_TFW_TX_RD_FIFO_READ_TH_MASK 0x0000FFFF +#define EC_TFW_TX_RD_FIFO_READ_TH_SHIFT 0 +/* Enable cut through operation of the Tx FIFO. */ +#define EC_TFW_TX_RD_FIFO_EN_CUT_THROUGH (1 << 16) + +/**** tx_checksum register ****/ +/* Enable L3 checksum insertion. */ +#define EC_TFW_TX_CHECKSUM_L3_EN (1 << 0) +/* Enable L4 checksum insertion. */ +#define EC_TFW_TX_CHECKSUM_L4_EN (1 << 1) +/* Enable L4 checksum when L3 fragmentation is detected. */ +#define EC_TFW_TX_CHECKSUM_L4_FRAG_EN (1 << 2) + +/**** tx_gen register ****/ +/* Force forward of all Tx packets to MAC. */ +#define EC_TFW_TX_GEN_FWD_ALL_TO_MAC (1 << 0) +/* Select the Packet generator as the source of Tx packets0 - Tx ... */ +#define EC_TFW_TX_GEN_SELECT_PKT_GEN (1 << 1) + +/**** tx_spf register ****/ +/* Select the VID for spoofing check:[0] - Packet VID[1] - Forwa ... */ +#define EC_TFW_TX_SPF_VID_SEL (1 << 0) + +/**** data_fifo register ****/ +/* FIFO used value (number of entries) */ +#define EC_TFW_DATA_FIFO_USED_MASK 0x0000FFFF +#define EC_TFW_DATA_FIFO_USED_SHIFT 0 +/* FIFO FULL status */ +#define EC_TFW_DATA_FIFO_FULL (1 << 16) +/* FIFO EMPTY status */ +#define EC_TFW_DATA_FIFO_EMPTY (1 << 17) + +/**** ctrl_fifo register ****/ +/* FIFO used value (number of entries) */ +#define EC_TFW_CTRL_FIFO_USED_MASK 0x0000FFFF +#define EC_TFW_CTRL_FIFO_USED_SHIFT 0 +/* FIFO FULL status */ +#define EC_TFW_CTRL_FIFO_FULL (1 << 16) +/* FIFO EMPTY status */ +#define EC_TFW_CTRL_FIFO_EMPTY (1 << 17) + +/**** hdr_fifo register ****/ +/* FIFO used value (number of entries) */ +#define EC_TFW_HDR_FIFO_USED_MASK 0x0000FFFF +#define EC_TFW_HDR_FIFO_USED_SHIFT 0 +/* FIFO FULL status */ +#define EC_TFW_HDR_FIFO_FULL (1 << 16) +/* FIFO EMPTY status */ +#define EC_TFW_HDR_FIFO_EMPTY (1 << 17) + +/**** uc_udma register ****/ +/* Default UDMA bitmap +(MSB represents physical port) */ +#define EC_TFW_UDMA_UC_UDMA_DEF_MASK 0x0000001F +#define EC_TFW_UDMA_UC_UDMA_DEF_SHIFT 0 + +/**** mc_udma register ****/ +/* Default UDMA bitmap (MSB represents physical port.) */ +#define EC_TFW_UDMA_MC_UDMA_DEF_MASK 0x0000001F +#define EC_TFW_UDMA_MC_UDMA_DEF_SHIFT 0 + +/**** bc_udma register ****/ +/* Default UDMA bitmap (MSB represents physical port.) */ +#define EC_TFW_UDMA_BC_UDMA_DEF_MASK 0x0000001F +#define EC_TFW_UDMA_BC_UDMA_DEF_SHIFT 0 + +/**** spf_cmd register ****/ +/* Command for the VLAN spoofing00 – Ignore mismatch */ +#define EC_TFW_UDMA_SPF_CMD_VID_MASK 0x00000003 +#define EC_TFW_UDMA_SPF_CMD_VID_SHIFT 0 +/* Command for VLAN spoofing 00 - Ignore mismatch */ +#define EC_TFW_UDMA_SPF_CMD_MAC_MASK 0x0000000C +#define EC_TFW_UDMA_SPF_CMD_MAC_SHIFT 2 + +/**** fwd_dec register ****/ +/* Forwarding decision control:[0] – Enable internal switch */ +#define EC_TFW_UDMA_FWD_DEC_CTRL_MASK 0x000003FF +#define EC_TFW_UDMA_FWD_DEC_CTRL_SHIFT 0 + +/**** tx_cfg register ****/ +/* Swap output byte order */ +#define EC_TMI_TX_CFG_SWAP_BYTES (1 << 0) +/* Enable forwarding to the Rx data path. */ +#define EC_TMI_TX_CFG_EN_FWD_TO_RX (1 << 1) +/* Force forwarding all packets to the MAC. */ +#define EC_TMI_TX_CFG_FORCE_FWD_MAC (1 << 2) +/* Force forwarding all packets to the MAC. */ +#define EC_TMI_TX_CFG_FORCE_FWD_RX (1 << 3) +/* Force loop back operation */ +#define EC_TMI_TX_CFG_FORCE_LB (1 << 4) + +/**** ec_pause register ****/ +/* Mask of pause_on [7:0] */ +#define EC_EFC_EC_PAUSE_MASK_MAC_MASK 0x000000FF +#define EC_EFC_EC_PAUSE_MASK_MAC_SHIFT 0 +/* Mask of GPIO input [7:0] */ +#define EC_EFC_EC_PAUSE_MASK_GPIO_MASK 0x0000FF00 +#define EC_EFC_EC_PAUSE_MASK_GPIO_SHIFT 8 + +/**** ec_xoff register ****/ +/* Mask 1 for XOFF [7:0] +Mask 1 for Almost Full indication, */ +#define EC_EFC_EC_XOFF_MASK_1_MASK 0x000000FF +#define EC_EFC_EC_XOFF_MASK_1_SHIFT 0 +/* Mask 2 for XOFF [7:0] Mask 2 for sampled Almost Full indicati ... */ +#define EC_EFC_EC_XOFF_MASK_2_MASK 0x0000FF00 +#define EC_EFC_EC_XOFF_MASK_2_SHIFT 8 + +/**** xon register ****/ +/* Mask 1 for generating XON pulse, masking XOFF [0] */ +#define EC_EFC_XON_MASK_1 (1 << 0) +/* Mask 2 for generating XON pulse, masking Almost Full indicati ... */ +#define EC_EFC_XON_MASK_2 (1 << 1) + +/**** gpio register ****/ +/* Mask for generating GPIO output XOFF indication from XOFF[0] */ +#define EC_EFC_GPIO_MASK_1 (1 << 0) + +/**** rx_fifo_af register ****/ +/* Threshold */ +#define EC_EFC_RX_FIFO_AF_TH_MASK 0x0000FFFF +#define EC_EFC_RX_FIFO_AF_TH_SHIFT 0 + +/**** rx_fifo_hyst register ****/ +/* Threshold low */ +#define EC_EFC_RX_FIFO_HYST_TH_LOW_MASK 0x0000FFFF +#define EC_EFC_RX_FIFO_HYST_TH_LOW_SHIFT 0 +/* Threshold high */ +#define EC_EFC_RX_FIFO_HYST_TH_HIGH_MASK 0xFFFF0000 +#define EC_EFC_RX_FIFO_HYST_TH_HIGH_SHIFT 16 + +/**** stat register ****/ +/* 10G MAC PFC mode, input from the 10 MAC */ +#define EC_EFC_STAT_PFC_MODE (1 << 0) + +/**** ec_pfc register ****/ +/* Force PFC flow control */ +#define EC_EFC_EC_PFC_FORCE_MASK 0x000000FF +#define EC_EFC_EC_PFC_FORCE_SHIFT 0 + +/**** q_pause_0 register ****/ +/* [i] – Mask for Q[i] */ +#define EC_FC_UDMA_Q_PAUSE_0_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_PAUSE_0_MASK_SHIFT 0 + +/**** q_pause_1 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_PAUSE_1_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_PAUSE_1_MASK_SHIFT 0 + +/**** q_pause_2 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_PAUSE_2_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_PAUSE_2_MASK_SHIFT 0 + +/**** q_pause_3 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_PAUSE_3_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_PAUSE_3_MASK_SHIFT 0 + +/**** q_pause_4 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_PAUSE_4_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_PAUSE_4_MASK_SHIFT 0 + +/**** q_pause_5 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_PAUSE_5_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_PAUSE_5_MASK_SHIFT 0 + +/**** q_pause_6 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_PAUSE_6_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_PAUSE_6_MASK_SHIFT 0 + +/**** q_pause_7 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_PAUSE_7_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_PAUSE_7_MASK_SHIFT 0 + +/**** q_gpio_0 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_GPIO_0_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_GPIO_0_MASK_SHIFT 0 + +/**** q_gpio_1 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_GPIO_1_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_GPIO_1_MASK_SHIFT 0 + +/**** q_gpio_2 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_GPIO_2_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_GPIO_2_MASK_SHIFT 0 + +/**** q_gpio_3 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_GPIO_3_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_GPIO_3_MASK_SHIFT 0 + +/**** q_gpio_4 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_GPIO_4_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_GPIO_4_MASK_SHIFT 0 + +/**** q_gpio_5 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_GPIO_5_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_GPIO_5_MASK_SHIFT 0 + +/**** q_gpio_6 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_GPIO_6_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_GPIO_6_MASK_SHIFT 0 + +/**** q_gpio_7 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_GPIO_7_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_GPIO_7_MASK_SHIFT 0 + +/**** s_pause register ****/ +/* Mask of pause_on [7:0] */ +#define EC_FC_UDMA_S_PAUSE_MASK_MAC_MASK 0x000000FF +#define EC_FC_UDMA_S_PAUSE_MASK_MAC_SHIFT 0 +/* Mask of GPIO input [7:0] */ +#define EC_FC_UDMA_S_PAUSE_MASK_GPIO_MASK 0x0000FF00 +#define EC_FC_UDMA_S_PAUSE_MASK_GPIO_SHIFT 8 + +/**** q_xoff_0 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_XOFF_0_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_XOFF_0_MASK_SHIFT 0 + +/**** q_xoff_1 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_XOFF_1_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_XOFF_1_MASK_SHIFT 0 + +/**** q_xoff_2 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_XOFF_2_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_XOFF_2_MASK_SHIFT 0 + +/**** q_xoff_3 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_XOFF_3_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_XOFF_3_MASK_SHIFT 0 + +/**** q_xoff_4 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_XOFF_4_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_XOFF_4_MASK_SHIFT 0 + +/**** q_xoff_5 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_XOFF_5_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_XOFF_5_MASK_SHIFT 0 + +/**** q_xoff_6 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_XOFF_6_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_XOFF_6_MASK_SHIFT 0 + +/**** q_xoff_7 register ****/ +/* [i] - Mask for Q[i] */ +#define EC_FC_UDMA_Q_XOFF_7_MASK_MASK 0x0000000F +#define EC_FC_UDMA_Q_XOFF_7_MASK_SHIFT 0 + +/**** cfg_e register ****/ +/* Use MAC Tx FIFO empty status for EEE control. */ +#define EC_EEE_CFG_E_USE_MAC_TX_FIFO (1 << 0) +/* Use MAC Rx FIFO empty status for EEE control. */ +#define EC_EEE_CFG_E_USE_MAC_RX_FIFO (1 << 1) +/* Use Ethernet controller Tx FIFO empty status for EEE control */ +#define EC_EEE_CFG_E_USE_EC_TX_FIFO (1 << 2) +/* Use Ethernet controller Rx FIFO empty status for EEE control */ +#define EC_EEE_CFG_E_USE_EC_RX_FIFO (1 << 3) +/* Enable Low power signalling. */ +#define EC_EEE_CFG_E_ENABLE (1 << 4) +/* Mask output to MAC. */ +#define EC_EEE_CFG_E_MASK_MAC_EEE (1 << 8) +/* Mask output to stop MAC interface. */ +#define EC_EEE_CFG_E_MASK_EC_TMI_STOP (1 << 9) + +/**** stat_eee register ****/ +/* EEE state */ +#define EC_EEE_STAT_EEE_STATE_MASK 0x0000000F +#define EC_EEE_STAT_EEE_STATE_SHIFT 0 +/* EEE detected */ +#define EC_EEE_STAT_EEE_DET (1 << 4) + +/**** p_parse_cfg register ****/ +/* MAX number of beats for packet parsing */ +#define EC_MSP_P_PARSE_CFG_MAX_BEATS_MASK 0x000000FF +#define EC_MSP_P_PARSE_CFG_MAX_BEATS_SHIFT 0 +/* MAX number of parsing iterations for packet parsing */ +#define EC_MSP_P_PARSE_CFG_MAX_ITER_MASK 0x0000FF00 +#define EC_MSP_P_PARSE_CFG_MAX_ITER_SHIFT 8 + +/**** p_act_table_addr register ****/ +/* Address for accessing the table */ +#define EC_MSP_P_ACT_TABLE_ADDR_VAL_MASK 0x0000001F +#define EC_MSP_P_ACT_TABLE_ADDR_VAL_SHIFT 0 + +/**** p_act_table_data_1 register ****/ +/* Table data[5:0] - Offset to next protocol [bytes] [6] - Next ... */ +#define EC_MSP_P_ACT_TABLE_DATA_1_VAL_MASK 0x03FFFFFF +#define EC_MSP_P_ACT_TABLE_DATA_1_VAL_SHIFT 0 + +/**** p_act_table_data_2 register ****/ +/* Table data [8:0] - Offset to data in the packet [bits][17:9] ... */ +#define EC_MSP_P_ACT_TABLE_DATA_2_VAL_MASK 0x1FFFFFFF +#define EC_MSP_P_ACT_TABLE_DATA_2_VAL_SHIFT 0 + +/**** p_act_table_data_3 register ****/ +/* Table data [8:0] - Offset to data in the packet [bits] [17 ... */ +#define EC_MSP_P_ACT_TABLE_DATA_3_VAL_MASK 0x1FFFFFFF +#define EC_MSP_P_ACT_TABLE_DATA_3_VAL_SHIFT 0 + +/**** p_act_table_data_4 register ****/ +/* Table data [7:0] - Offset to the header length location in th ... */ +#define EC_MSP_P_ACT_TABLE_DATA_4_VAL_MASK 0x0FFFFFFF +#define EC_MSP_P_ACT_TABLE_DATA_4_VAL_SHIFT 0 + +/**** p_act_table_data_6 register ****/ +/* Table data [0] - Wr header length [10:1] - Write header lengt ... */ +#define EC_MSP_P_ACT_TABLE_DATA_6_VAL_MASK 0x007FFFFF +#define EC_MSP_P_ACT_TABLE_DATA_6_VAL_SHIFT 0 + +/**** p_res_in register ****/ +/* Selector for input parse_en 0 - Input vector 1 - Default valu ... */ +#define EC_MSP_P_RES_IN_SEL_PARSE_EN (1 << 0) +/* Selector for input protocol_index 0 - Input vector 1 - Defa ... */ +#define EC_MSP_P_RES_IN_SEL_PROT_INDEX (1 << 1) +/* Selector for input hdr_offset 0 - Input vector 1 - Default v ... */ +#define EC_MSP_P_RES_IN_SEL_HDR_OFFSET (1 << 2) + +/**** h_hdr_len register ****/ +/* Value for selecting table 1 */ +#define EC_MSP_P_H_HDR_LEN_TABLE_1_MASK 0x000000FF +#define EC_MSP_P_H_HDR_LEN_TABLE_1_SHIFT 0 +/* Value for selecting table 2 */ +#define EC_MSP_P_H_HDR_LEN_TABLE_2_MASK 0x00FF0000 +#define EC_MSP_P_H_HDR_LEN_TABLE_2_SHIFT 16 + +/**** p_comp_data register ****/ +/* Data 1 for comparison */ +#define EC_MSP_C_P_COMP_DATA_DATA_1_MASK 0x0000FFFF +#define EC_MSP_C_P_COMP_DATA_DATA_1_SHIFT 0 +/* Data 2 for comparison +[18:16] - Stage +[24:19] - Branch ID */ +#define EC_MSP_C_P_COMP_DATA_DATA_2_MASK 0x01FF0000 +#define EC_MSP_C_P_COMP_DATA_DATA_2_SHIFT 16 + +/**** p_comp_mask register ****/ +/* Data 1 for comparison */ +#define EC_MSP_C_P_COMP_MASK_DATA_1_MASK 0x0000FFFF +#define EC_MSP_C_P_COMP_MASK_DATA_1_SHIFT 0 +/* Data 2 for comparison +[18:16] - Stage +[24:19] - Branch ID */ +#define EC_MSP_C_P_COMP_MASK_DATA_2_MASK 0x01FF0000 +#define EC_MSP_C_P_COMP_MASK_DATA_2_SHIFT 16 + +/**** p_comp_ctrl register ****/ +/* Output result value */ +#define EC_MSP_C_P_COMP_CTRL_RES_MASK 0x0000001F +#define EC_MSP_C_P_COMP_CTRL_RES_SHIFT 0 +/* Compare command for the data_1 field 00 - Compare 01 - <= 10 ... */ +#define EC_MSP_C_P_COMP_CTRL_CMD_1_MASK 0x00030000 +#define EC_MSP_C_P_COMP_CTRL_CMD_1_SHIFT 16 +/* Compare command for the data_2 field 00 - Compare 01 - <= 10 ... */ +#define EC_MSP_C_P_COMP_CTRL_CMD_2_MASK 0x000C0000 +#define EC_MSP_C_P_COMP_CTRL_CMD_2_SHIFT 18 +/* Entry is valid */ +#define EC_MSP_C_P_COMP_CTRL_VALID (1 << 31) + +/**** pkt_cfg register ****/ +/* Number of bytes in RoCE packet LRH header */ +#define EC_CRCE_PKT_CFG_ROCE_HEAD_SIZE_MASK 0x000000FF +#define EC_CRCE_PKT_CFG_ROCE_HEAD_SIZE_SHIFT 0 +/* Number of bytes in FCoE packet header */ +#define EC_CRCE_PKT_CFG_FCOE_HEAD_SIZE_MASK 0x0000FF00 +#define EC_CRCE_PKT_CFG_FCOE_HEAD_SIZE_SHIFT 8 +/* Number of bytes to ignore in the end of RoCE packet */ +#define EC_CRCE_PKT_CFG_ROCE_TRAIL_SIZE_MASK 0x000F0000 +#define EC_CRCE_PKT_CFG_ROCE_TRAIL_SIZE_SHIFT 16 +/* Number of bytes to ignore in the end of FCoE packet */ +#define EC_CRCE_PKT_CFG_FCOE_TRAIL_SIZE_MASK 0x00F00000 +#define EC_CRCE_PKT_CFG_FCOE_TRAIL_SIZE_SHIFT 20 +/* Select the header that will be used for the CRC engine when a ... */ +#define EC_CRCE_PKT_CFG_HDR_SEL (1 << 24) + +/**** swap_en register ****/ +/* Enable byte swap on input data of RoCE packet */ +#define EC_CRCE_SWAP_EN_ROCE_DATA_BYTE_SWAP (1 << 0) +/* Enable bit swap (within each byte) on input data of RoCE pack ... */ +#define EC_CRCE_SWAP_EN_ROCE_DATA_BIT_SWAP (1 << 1) +/* Enable byte swap on crc32 output of RoCE packet */ +#define EC_CRCE_SWAP_EN_ROCE_CRC32_BYTE_SWAP (1 << 2) +/* Enable bit swap (within each byte) on crc32 output of RoCE pa ... */ +#define EC_CRCE_SWAP_EN_ROCE_CRC32_BIT_SWAP (1 << 3) +/* Enable bit complement on crc32 output of RoCE packet */ +#define EC_CRCE_SWAP_EN_ROCE_CRC32_BIT_COMP (1 << 4) +/* Enable byte swap on input data of FCoE packet */ +#define EC_CRCE_SWAP_EN_FCOE_DATA_BYTE_SWAP (1 << 5) +/* Enable bit swap (within each byte) on input data of FCoE pack ... */ +#define EC_CRCE_SWAP_EN_FCOE_DATA_BIT_SWAP (1 << 6) +/* Enable byte swap on crc32 output of FCoE packet */ +#define EC_CRCE_SWAP_EN_FCOE_CRC32_BYTE_SWAP (1 << 7) +/* Enable bit swap (within each byte) on crc32 output of FCoE pa ... */ +#define EC_CRCE_SWAP_EN_FCOE_CRC32_BIT_SWAP (1 << 8) +/* Enable bit complement on crc32 output of FCoE packet */ +#define EC_CRCE_SWAP_EN_FCOE_CRC32_BIT_COMP (1 << 9) + +/**** gen_cfg register ****/ +/* Default result for undetected protocols. */ +#define EC_CRCE_GEN_CFG_RES_DEF (1 << 0) +/* Enable CRC (RoCE/FCoE) insertion in the Tx data path(override ... */ +#define EC_CRCE_GEN_CFG_EN_TX_CRC_INS (1 << 4) +/* Enable RoCE insertion in the Tx data path */ +#define EC_CRCE_GEN_CFG_EN_TX_ROCE_INS (1 << 5) +/* Enable FcoE insertion in the Tx data path */ +#define EC_CRCE_GEN_CFG_EN_TX_FCOE_INS (1 << 6) +/* Enable CRC (RoCE/FCoE) insertion in the Tx data path (overrid ... */ +#define EC_CRCE_GEN_CFG_EN_TX_CKS_DIS (1 << 8) +/* Disable checksum insertion when RoCE packet is detected */ +#define EC_CRCE_GEN_CFG_EN_TX_ROCE_CKS_DIS (1 << 9) +/* Disable checksum insertion when FcoE packet is detected */ +#define EC_CRCE_GEN_CFG_EN_TX_FCOE_CKS_DIS (1 << 10) +/* TX L3 offset selection0 - Standard L3 offset selection from t ... */ +#define EC_CRCE_GEN_CFG_TX_OFFSET_SELECTION (1 << 12) + +/**** wol_en register ****/ +/* Interrupt enable WoL MAC DA Unicast detected packet */ +#define EC_WOL_WOL_EN_INTRPT_EN_UNICAST (1 << 0) +/* Interrupt enable WoL L2 Multicast detected packet */ +#define EC_WOL_WOL_EN_INTRPT_EN_MULTICAST (1 << 1) +/* Interrupt enable WoL L2 Broadcast detected packet */ +#define EC_WOL_WOL_EN_INTRPT_EN_BROADCAST (1 << 2) +/* Interrupt enable WoL IPv4 detected packet */ +#define EC_WOL_WOL_EN_INTRPT_EN_IPV4 (1 << 3) +/* Interrupt enable WoL IPv6 detected packet */ +#define EC_WOL_WOL_EN_INTRPT_EN_IPV6 (1 << 4) +/* Interrupt enable WoL EtherType+MAC DA detected packet */ +#define EC_WOL_WOL_EN_INTRPT_EN_ETHERTYPE_DA (1 << 5) +/* Interrupt enable WoL EtherType+L2 Broadcast detected packet */ +#define EC_WOL_WOL_EN_INTRPT_EN_ETHERTYPE_BC (1 << 6) +/* Interrupt enable WoL parser detected packet */ +#define EC_WOL_WOL_EN_INTRPT_EN_PARSER (1 << 7) +/* Interrupt enable WoL magic detected packet */ +#define EC_WOL_WOL_EN_INTRPT_EN_MAGIC (1 << 8) +/* Interrupt enable WoL magic+password detected packet */ +#define EC_WOL_WOL_EN_INTRPT_EN_MAGIC_PSWD (1 << 9) +/* Forward enable WoL MAC DA Unicast detected packet */ +#define EC_WOL_WOL_EN_FWRD_EN_UNICAST (1 << 16) +/* Forward enable WoL L2 Multicast detected packet */ +#define EC_WOL_WOL_EN_FWRD_EN_MULTICAST (1 << 17) +/* Forward enable WoL L2 Broadcast detected packet */ +#define EC_WOL_WOL_EN_FWRD_EN_BROADCAST (1 << 18) +/* Forward enable WoL IPv4 detected packet */ +#define EC_WOL_WOL_EN_FWRD_EN_IPV4 (1 << 19) +/* Forward enable WoL IPv6 detected packet */ +#define EC_WOL_WOL_EN_FWRD_EN_IPV6 (1 << 20) +/* Forward enable WoL EtherType+MAC DA detected packet */ +#define EC_WOL_WOL_EN_FWRD_EN_ETHERTYPE_DA (1 << 21) +/* Forward enable WoL EtherType+L2 Broadcast detected packet */ +#define EC_WOL_WOL_EN_FWRD_EN_ETHERTYPE_BC (1 << 22) +/* Forward enable WoL parser detected packet */ +#define EC_WOL_WOL_EN_FWRD_EN_PARSER (1 << 23) + +/**** magic_pswd_h register ****/ +/* Password for magic_password packet detection - bits 47:32 */ +#define EC_WOL_MAGIC_PSWD_H_VAL_MASK 0x0000FFFF +#define EC_WOL_MAGIC_PSWD_H_VAL_SHIFT 0 + +/**** ethertype register ****/ +/* Configured EtherType 1 for WoL EtherType_da/EtherType_bc pack ... */ +#define EC_WOL_ETHERTYPE_VAL_1_MASK 0x0000FFFF +#define EC_WOL_ETHERTYPE_VAL_1_SHIFT 0 +/* Configured EtherType 2 for WoL EtherType_da/EtherType_bc pack ... */ +#define EC_WOL_ETHERTYPE_VAL_2_MASK 0xFFFF0000 +#define EC_WOL_ETHERTYPE_VAL_2_SHIFT 16 + +#define EC_PTH_SYSTEM_TIME_SUBSECONDS_LSB_VAL_MASK 0xFFFFC000 +#define EC_PTH_SYSTEM_TIME_SUBSECONDS_LSB_VAL_SHIFT 14 + +#define EC_PTH_CLOCK_PERIOD_LSB_VAL_MASK 0xFFFFC000 +#define EC_PTH_CLOCK_PERIOD_LSB_VAL_SHIFT 14 + +/**** int_update_ctrl register ****/ +/* This field chooses between two methods for SW to update the s ... */ +#define EC_PTH_INT_UPDATE_CTRL_UPDATE_TRIG (1 << 0) +/* 3'b000 - Set system time according to the value in {int_updat ... */ +#define EC_PTH_INT_UPDATE_CTRL_UPDATE_METHOD_MASK 0x0000000E +#define EC_PTH_INT_UPDATE_CTRL_UPDATE_METHOD_SHIFT 1 +/* 1'b1 - Next update writes to system_time_subseconds1'b0 - Nex ... */ +#define EC_PTH_INT_UPDATE_CTRL_SUBSECOND_MASK (1 << 4) +/* 1'b1 - Next update writes to system_time_seconds1'b0 - Next u ... */ +#define EC_PTH_INT_UPDATE_CTRL_SECOND_MASK (1 << 5) +/* Enabling / disabling the internal ingress trigger (ingress_tr ... */ +#define EC_PTH_INT_UPDATE_CTRL_INT_TRIG_EN (1 << 16) +/* Determines if internal ingress trigger (ingress_trigger #0) s ... */ +#define EC_PTH_INT_UPDATE_CTRL_PULSE_LEVEL_N (1 << 17) +/* Internal ingress trigger polarity (ingress_trigger #0)1'b0 - ... */ +#define EC_PTH_INT_UPDATE_CTRL_POLARITY (1 << 18) + +/**** int_update_subseconds_lsb register ****/ + +#define EC_PTH_INT_UPDATE_SUBSECONDS_LSB_RESERVED_13_0_MASK 0x00003FFF +#define EC_PTH_INT_UPDATE_SUBSECONDS_LSB_RESERVED_13_0_SHIFT 0 + +#define EC_PTH_INT_UPDATE_SUBSECONDS_LSB_VAL_MASK 0xFFFFC000 +#define EC_PTH_INT_UPDATE_SUBSECONDS_LSB_VAL_SHIFT 14 +/* 3'b000 - Set system time according to the value in {int_updat ... */ +#define EC_PTH_EXT_UPDATE_CTRL_UPDATE_METHOD_MASK 0x0000000E +#define EC_PTH_EXT_UPDATE_CTRL_UPDATE_METHOD_SHIFT 1 +/* 1'b1 - next update writes to system_time_subseconds1'b0 - nex ... */ +#define EC_PTH_EXT_UPDATE_CTRL_SUBSECOND_MASK (1 << 4) +/* 1'b1 - Next update writes to system_time_seconds1'b0 - Next u ... */ +#define EC_PTH_EXT_UPDATE_CTRL_SECOND_MASK (1 << 5) +/* Enabling / disabling the external ingress triggers (ingress_t ... */ +#define EC_PTH_EXT_UPDATE_CTRL_EXT_TRIG_EN_MASK 0x00001F00 +#define EC_PTH_EXT_UPDATE_CTRL_EXT_TRIG_EN_SHIFT 8 +/* Determines if external ingress triggers (ingress_triggers #1- ... */ +#define EC_PTH_EXT_UPDATE_CTRL_PULSE_LEVEL_N_MASK 0x001F0000 +#define EC_PTH_EXT_UPDATE_CTRL_PULSE_LEVEL_N_SHIFT 16 +/* bit-field configurations of external ingress trigger polarity ... */ +#define EC_PTH_EXT_UPDATE_CTRL_POLARITY_MASK 0x1F000000 +#define EC_PTH_EXT_UPDATE_CTRL_POLARITY_SHIFT 24 + +/**** ext_update_subseconds_lsb register ****/ + +#define EC_PTH_EXT_UPDATE_SUBSECONDS_LSB_RESERVED_13_0_MASK 0x00003FFF +#define EC_PTH_EXT_UPDATE_SUBSECONDS_LSB_RESERVED_13_0_SHIFT 0 + +#define EC_PTH_EXT_UPDATE_SUBSECONDS_LSB_VAL_MASK 0xFFFFC000 +#define EC_PTH_EXT_UPDATE_SUBSECONDS_LSB_VAL_SHIFT 14 + +#define EC_PTH_READ_COMPENSATION_SUBSECONDS_LSB_VAL_MASK 0xFFFFC000 +#define EC_PTH_READ_COMPENSATION_SUBSECONDS_LSB_VAL_SHIFT 14 + +#define EC_PTH_INT_WRITE_COMPENSATION_SUBSECONDS_LSB_VAL_MASK 0xFFFFC000 +#define EC_PTH_INT_WRITE_COMPENSATION_SUBSECONDS_LSB_VAL_SHIFT 14 + +#define EC_PTH_EXT_WRITE_COMPENSATION_SUBSECONDS_LSB_VAL_MASK 0xFFFFC000 +#define EC_PTH_EXT_WRITE_COMPENSATION_SUBSECONDS_LSB_VAL_SHIFT 14 + +#define EC_PTH_SYNC_COMPENSATION_SUBSECONDS_LSB_VAL_MASK 0xFFFFC000 +#define EC_PTH_SYNC_COMPENSATION_SUBSECONDS_LSB_VAL_SHIFT 14 + +/**** trigger_ctrl register ****/ +/* Enabling / disabling the egress trigger1'b1 - Enabled1'b0 - D ... */ +#define EC_PTH_EGRESS_TRIGGER_CTRL_EN (1 << 0) +/* Configuration that determines if the egress trigger is a peri ... */ +#define EC_PTH_EGRESS_TRIGGER_CTRL_PERIODIC (1 << 1) +/* Configuration of egress trigger polarity */ +#define EC_PTH_EGRESS_TRIGGER_CTRL_POLARITY (1 << 2) +/* If the pulse is marked as periodic (see periodic field), this ... */ +#define EC_PTH_EGRESS_TRIGGER_CTRL_PERIOD_SUBSEC_MASK 0x00FFFFF0 +#define EC_PTH_EGRESS_TRIGGER_CTRL_PERIOD_SUBSEC_SHIFT 4 +/* If the pulse is marked as periodic (see periodic field), this ... */ +#define EC_PTH_EGRESS_TRIGGER_CTRL_PERIOD_SEC_MASK 0xFF000000 +#define EC_PTH_EGRESS_TRIGGER_CTRL_PERIOD_SEC_SHIFT 24 + +/**** trigger_subseconds_lsb register ****/ + +#define EC_PTH_EGRESS_TRIGGER_SUBSECONDS_LSB_RESERVED_13_0_MASK 0x00003FFF +#define EC_PTH_EGRESS_TRIGGER_SUBSECONDS_LSB_RESERVED_13_0_SHIFT 0 + +#define EC_PTH_EGRESS_TRIGGER_SUBSECONDS_LSB_VAL_MASK 0xFFFFC000 +#define EC_PTH_EGRESS_TRIGGER_SUBSECONDS_LSB_VAL_SHIFT 14 + +/**** pulse_width_subseconds_lsb register ****/ + +#define EC_PTH_EGRESS_PULSE_WIDTH_SUBSECONDS_LSB_RESERVED_13_0_MASK 0x00003FFF +#define EC_PTH_EGRESS_PULSE_WIDTH_SUBSECONDS_LSB_RESERVED_13_0_SHIFT 0 + +#define EC_PTH_EGRESS_PULSE_WIDTH_SUBSECONDS_LSB_VAL_MASK 0xFFFFC000 +#define EC_PTH_EGRESS_PULSE_WIDTH_SUBSECONDS_LSB_VAL_SHIFT 14 + +/**** qual register ****/ + +#define EC_PTH_DB_QUAL_TS_VALID (1 << 0) + +#define EC_PTH_DB_QUAL_RESERVED_31_1_MASK 0xFFFFFFFE +#define EC_PTH_DB_QUAL_RESERVED_31_1_SHIFT 1 + +/**** roce_gen_cfg_1 register ****/ +/* Ignore number of segments check for Middle packets */ +#define EC_ROCE_ROCE_GEN_CFG_1_NUM_SEG_MIDDLE_IGNORE (1 << 0) +/* Ignore number of segments check for Last packets */ +#define EC_ROCE_ROCE_GEN_CFG_1_NUM_SEG_LAST_IGNORE (1 << 1) +/* Ignore TVER field check */ +#define EC_ROCE_ROCE_GEN_CFG_1_IGNORE_TVER (1 << 4) +/* Ignore NXTHDR field check */ +#define EC_ROCE_ROCE_GEN_CFG_1_IGNORE_NXTHDR (1 << 5) +/* Ignore IPVER field check */ +#define EC_ROCE_ROCE_GEN_CFG_1_IGNORE_IPVER (1 << 6) +/* Ignore ROCE L3 protocol index check */ +#define EC_ROCE_ROCE_GEN_CFG_1_IGNORE_ROCE_L3_INDEX_1_MASK 0x00000700 +#define EC_ROCE_ROCE_GEN_CFG_1_IGNORE_ROCE_L3_INDEX_1_SHIFT 8 +/* Ignore ROCE L3 protocol index check */ +#define EC_ROCE_ROCE_GEN_CFG_1_IGNORE_ROCE_L3_INDEX_2_MASK 0x00007000 +#define EC_ROCE_ROCE_GEN_CFG_1_IGNORE_ROCE_L3_INDEX_2_SHIFT 12 +/* Ignore ROCE L4 protocol index check */ +#define EC_ROCE_ROCE_GEN_CFG_1_IGNORE_ROCE_L4_INDEX_MASK 0x00070000 +#define EC_ROCE_ROCE_GEN_CFG_1_IGNORE_ROCE_L4_INDEX_SHIFT 16 +/* Ignore ROCE NXTHDR check */ +#define EC_ROCE_ROCE_GEN_CFG_1_IGNORE_ROCE_NXTHDR_MASK 0x00700000 +#define EC_ROCE_ROCE_GEN_CFG_1_IGNORE_ROCE_NXTHDR_SHIFT 20 +/* Enable each one of the 3 RoCE packet types */ +#define EC_ROCE_ROCE_GEN_CFG_1_EN_ROCE_PKT_TYPE_MASK 0x07000000 +#define EC_ROCE_ROCE_GEN_CFG_1_EN_ROCE_PKT_TYPE_SHIFT 24 +/* Select the header that will be used for the RoCE acceleration ... */ +#define EC_ROCE_ROCE_GEN_CFG_1_HDR_SEL (1 << 28) + +/**** rr_err register ****/ +/* Mask error for FSM state transition to IDLE */ +#define EC_ROCE_RR_ERR_MASK_MASK 0x003FFFFF +#define EC_ROCE_RR_ERR_MASK_SHIFT 0 + +/**** rr_qual register ****/ +/* Mask error for packet qualification */ +#define EC_ROCE_RR_QUAL_MASK_MASK 0x003FFFFF +#define EC_ROCE_RR_QUAL_MASK_SHIFT 0 + +/**** rr_err_cnt register ****/ +/* Mask error for error counter */ +#define EC_ROCE_RR_ERR_CNT_MASK_MASK 0x003FFFFF +#define EC_ROCE_RR_ERR_CNT_MASK_SHIFT 0 + +/**** rr_err_int register ****/ +/* Mask error for interrupt generation */ +#define EC_ROCE_RR_ERR_INT_MASK_MASK 0x003FFFFF +#define EC_ROCE_RR_ERR_INT_MASK_SHIFT 0 + +/**** roce_gen_en_1 register ****/ +/* Enable READ RESPONSE acceleration */ +#define EC_ROCE_ROCE_GEN_EN_1_EN_RR (1 << 0) +/* Enable WRITE REQUEST acceleration */ +#define EC_ROCE_ROCE_GEN_EN_1_EN_WR (1 << 1) +/* Enable UDMA selection by RoCE engine */ +#define EC_ROCE_ROCE_GEN_EN_1_EN_UDMA_SEL (1 << 4) +/* Enable QUEUE selection by RoCE engine */ +#define EC_ROCE_ROCE_GEN_EN_1_EN_QUEUE_SEL (1 << 5) +/* Enable header split */ +#define EC_ROCE_ROCE_GEN_EN_1_EN_HEADER_SPLIT (1 << 6) +/* Enable direct data placement */ +#define EC_ROCE_ROCE_GEN_EN_1_EN_DIRECT_DATA_PLACEMENT (1 << 7) +/* Enable EOP data trimming (ICRC) */ +#define EC_ROCE_ROCE_GEN_EN_1_EN_EOP_DATA_TRIM (1 << 8) +/* Enable the RR FIFO */ +#define EC_ROCE_ROCE_GEN_EN_1_RR_FIFO_EN (1 << 16) +/* Load the size of the RR FIFO (the size of each QP FIFO) */ +#define EC_ROCE_ROCE_GEN_EN_1_RR_FIFO_SIZE_LOAD (1 << 17) + +/**** roce_hdr register ****/ +/* NXTHDR field of the GRH */ +#define EC_ROCE_ROCE_HDR_NXTHDR_MASK 0x000000FF +#define EC_ROCE_ROCE_HDR_NXTHDR_SHIFT 0 +/* TVER field of the BTH */ +#define EC_ROCE_ROCE_HDR_IPVER_MASK 0x00000F00 +#define EC_ROCE_ROCE_HDR_IPVER_SHIFT 8 +/* IPVER field of the GRH */ +#define EC_ROCE_ROCE_HDR_TVER_MASK 0x0000F000 +#define EC_ROCE_ROCE_HDR_TVER_SHIFT 12 + +/**** qp_val register ****/ +/* DATA for comparison */ +#define EC_ROCE_QP_DB_QP_VAL_DATA_MASK 0x00FFFFFF +#define EC_ROCE_QP_DB_QP_VAL_DATA_SHIFT 0 +/* Entry is valid */ +#define EC_ROCE_QP_DB_QP_VAL_VALID (1 << 31) + +/**** roce_qp_cfg_1 register ****/ +/* QP service type:[3] - Ignore service type[2:0] - Service type ... */ +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_SERVICE_TYPE_MASK 0x0000000F +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_SERVICE_TYPE_SHIFT 0 +/* Enable RR address translation for this QP */ +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_EN_RR_TRANSLATION (1 << 4) +/* Enable RR address translation for this QP */ +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_EN_WR_TRANSLATION (1 << 5) +/* UDMA value */ +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_UDMA_MASK 0x000F0000 +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_UDMA_SHIFT 16 +/* Queue value */ +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_QUEUE_MASK 0x00300000 +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_QUEUE_SHIFT 20 +/* UDMA selector */ +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_UDMA_SEL_MASK 0x03000000 +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_UDMA_SEL_SHIFT 24 +/* Queue selector */ +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_QUEUE_SEL_MASK 0x0C000000 +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_QUEUE_SEL_SHIFT 26 +/* MTU shift for read request segment count calculation */ +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_MTU_SHIFT_MASK 0xF0000000 +#define EC_ROCE_QP_DB_ROCE_QP_CFG_1_MTU_SHIFT_SHIFT 28 + +/**** roce_qp_cfg_2 register ****/ +/* Ignore Syndrome check result */ +#define EC_ROCE_QP_DB_ROCE_QP_CFG_2_IGNORE_SYNDROME (1 << 0) +/* Ignore RKEY check result */ +#define EC_ROCE_QP_DB_ROCE_QP_CFG_2_IGNORE_RKEY (1 << 1) +/* Ignore PKEY check result */ +#define EC_ROCE_QP_DB_ROCE_QP_CFG_2_IGNORE_PKEY (1 << 2) + +/**** rr_ctrl register ****/ +/* Activate the READ RESPONSE state machine */ +#define EC_ROCE_QP_DB_RR_CTRL_ACTIVATE (1 << 0) +/* Force the READ RESPONSE state machine to change from ACTIVE s ... */ +#define EC_ROCE_QP_DB_RR_CTRL_DEACTIVATE_FROM_ACTIVE (1 << 4) +/* Force the READ RESPONSE state machine to change from MESSAGE ... */ +#define EC_ROCE_QP_DB_RR_CTRL_DEACTIVATE_FROM_MESSAGE (1 << 8) +/* Enable READ RESPONSE acceleration */ +#define EC_ROCE_QP_DB_RR_CTRL_ENABLE (1 << 16) +/* Enable READ RESPONSE direct data placement */ +#define EC_ROCE_QP_DB_RR_CTRL_ENABLE_DDP (1 << 17) +/* Enable READ RESPONSE direct UDMA override */ +#define EC_ROCE_QP_DB_RR_CTRL_ENABLE_UDMA_OR (1 << 18) +/* Enable READ RESPONSE direct Queue override */ +#define EC_ROCE_QP_DB_RR_CTRL_ENABLE_QUEUE_OR (1 << 19) + +/**** wr_ctrl register ****/ +/* Activate the READ RESPONSE state machine */ +#define EC_ROCE_QP_DB_WR_CTRL_ACTIVATE (1 << 0) +/* Enable the transition from IDLE to SYNC */ +#define EC_ROCE_QP_DB_WR_CTRL_SYNC (1 << 1) +/* Force the READ RESPONSE state machine to change from ACTIVE s ... */ +#define EC_ROCE_QP_DB_WR_CTRL_DEACTIVATE_FROM_ACTIVE (1 << 4) +/* Force the READ RESPONSE state machine to change from MESSAGE ... */ +#define EC_ROCE_QP_DB_WR_CTRL_DEACTIVATE_FROM_MESSAGE (1 << 8) +/* Force the READ RESPONSE state machine to change from SYNC sta ... */ +#define EC_ROCE_QP_DB_WR_CTRL_DEACTIVATE_FROM_SYNC (1 << 12) +/* Enable WRITE REQUEST acceleration */ +#define EC_ROCE_QP_DB_WR_CTRL_ENABLE (1 << 16) +/* Enable WRITE REQUEST direct data placement */ +#define EC_ROCE_QP_DB_WR_CTRL_ENABLE_DDP (1 << 17) +/* Enable WRITE REQUESTE direct UDMA override */ +#define EC_ROCE_QP_DB_WR_CTRL_ENABLE_UDMA_OR (1 << 18) +/* Enable WRITE REQUEST direct Queue override */ +#define EC_ROCE_QP_DB_WR_CTRL_ENABLE_QUEUE_OR (1 << 19) + +/**** wr_psn register ****/ +/* PSN value +(write only when the QP WR is in IDLE state */ +#define EC_ROCE_QP_DB_WR_PSN_VAL_MASK 0x00FFFFFF +#define EC_ROCE_QP_DB_WR_PSN_VAL_SHIFT 0 + +/**** wr_psn_last_sw register ****/ +/* PSN value */ +#define EC_ROCE_QP_DB_WR_PSN_LAST_SW_VAL_MASK 0x00FFFFFF +#define EC_ROCE_QP_DB_WR_PSN_LAST_SW_VAL_SHIFT 0 + +/**** wr_psn_last_hw register ****/ +/* PSN value */ +#define EC_ROCE_QP_DB_WR_PSN_LAST_HW_VAL_MASK 0x00FFFFFF +#define EC_ROCE_QP_DB_WR_PSN_LAST_HW_VAL_SHIFT 0 + +/**** wr_psn_hw register ****/ +/* PSN value */ +#define EC_ROCE_QP_DB_WR_PSN_HW_VAL_MASK 0x00FFFFFF +#define EC_ROCE_QP_DB_WR_PSN_HW_VAL_SHIFT 0 + +/**** p_key register ****/ +/* Value */ +#define EC_ROCE_QP_DB_P_KEY_VAL_MASK 0x0000FFFF +#define EC_ROCE_QP_DB_P_KEY_VAL_SHIFT 0 + +/**** roce_status register ****/ +/* READ RESPONSE state */ +#define EC_ROCE_QP_DB_ROCE_STATUS_RR_STATE_MASK 0x00000007 +#define EC_ROCE_QP_DB_ROCE_STATUS_RR_STATE_SHIFT 0 +/* WRITE REQUEST state */ +#define EC_ROCE_QP_DB_ROCE_STATUS_WR_STATE_MASK 0x000000F0 +#define EC_ROCE_QP_DB_ROCE_STATUS_WR_STATE_SHIFT 4 + +/**** rr_fifo_3 register ****/ +/* Expected READ RESPONSE PSN */ +#define EC_ROCE_QP_DB_RR_FIFO_3_RR_PSN_MASK 0x00FFFFFF +#define EC_ROCE_QP_DB_RR_FIFO_3_RR_PSN_SHIFT 0 +/* Address requires translation */ +#define EC_ROCE_QP_DB_RR_FIFO_3_NEED_TRANSLATION (1 << 31) + +/**** rr_fifo_4 register ****/ +/* Number of segments for this READ REQUEST */ +#define EC_ROCE_QP_DB_RR_FIFO_4_NUM_OF_SEG_MASK 0x00FFFFFF +#define EC_ROCE_QP_DB_RR_FIFO_4_NUM_OF_SEG_SHIFT 0 + +/**** rr_fifo_status register ****/ +/* FIFO status – used */ +#define EC_ROCE_QP_DB_RR_FIFO_STATUS_USED_MASK 0x0000007F +#define EC_ROCE_QP_DB_RR_FIFO_STATUS_USED_SHIFT 0 +/* FIFO status - empty */ +#define EC_ROCE_QP_DB_RR_FIFO_STATUS_EMPTY (1 << 16) +/* FIFO status - full */ +#define EC_ROCE_QP_DB_RR_FIFO_STATUS_FULL (1 << 20) + +/**** rr_fifo_ctrl register ****/ +/* FIFO reset */ +#define EC_ROCE_QP_DB_RR_FIFO_CTRL_RESET (1 << 0) +/* FIFO reset */ +#define EC_ROCE_QP_DB_RR_FIFO_CTRL_SIZE_MASK 0x00007F00 +#define EC_ROCE_QP_DB_RR_FIFO_CTRL_SIZE_SHIFT 8 + +/**** bypass register ****/ +/* Bypass outstanding module */ +#define EC_GEN_V3_BYPASS_OUTSTANDING_CTRL (1 << 0) +/* Bypass Tx inline crypto module */ +#define EC_GEN_V3_BYPASS_TX_CRYPTO (1 << 1) +/* Bypass Rx inline crypto module */ +#define EC_GEN_V3_BYPASS_RX_CRYPTO (1 << 2) +/* Bypass Tx XTS read */ +#define EC_GEN_V3_BYPASS_TX_XTS_READ (1 << 3) +/* Bypass rdma_sop_ready */ +#define EC_GEN_V3_BYPASS_RDMA (1 << 4) + +/**** rx_comp_desc register ****/ +/* Selection for word0[13]:0- legacy SR-A01- per generic protoco ... */ +#define EC_GEN_V3_RX_COMP_DESC_W0_L3_CKS_RES_SEL (1 << 0) +/* Selection for word0[14]:0- legacy SR-A01- per generic protoco ... */ +#define EC_GEN_V3_RX_COMP_DESC_W0_L4_CKS_RES_SEL (1 << 1) +/* Selection for word3[2:0]:0- priority (legacy SR-A0)1- rdma_in ... */ +#define EC_GEN_V3_RX_COMP_DESC_W3_PRIORITY_RDMA_INFO_2_0_SEL (1 << 2) +/* Selection for word3[10:3]:0- l4_offset / lro value (legacy SR ... */ +#define EC_GEN_V3_RX_COMP_DESC_W3_L4_OFFSET_RDMA_INFO_10_3_SEL_MASK 0x00000018 +#define EC_GEN_V3_RX_COMP_DESC_W3_L4_OFFSET_RDMA_INFO_10_3_SEL_SHIFT 3 +/* Selection for word3[12:11]:0-lro_context_result (legacy SR-A0 ... */ +#define EC_GEN_V3_RX_COMP_DESC_W3_LRO_RES_RDMA_INFO_12_11_SEL (1 << 5) +/* Selection for word3[15:13]:0-switch source port (legacy SR-A0 ... */ +#define EC_GEN_V3_RX_COMP_DESC_W3_SPORT_RDMA_INFO_15_13_SEL (1 << 6) +/* Selection for word3[28:16]:0-macseec decryption status[12:0] ... */ +#define EC_GEN_V3_RX_COMP_DESC_W3_DEC_STAT_12_0_RDMA_INFO_28_16_SEL (1 << 7) +/* Selection for word3[29]:0-macsec decryption status[13] (legac ... */ +#define EC_GEN_V3_RX_COMP_DESC_W3_DEC_STAT_13_L4_CKS_RES_SEL (1 << 8) +/* Selection for word3[30]:0-macsec decryption status[14] (legac ... */ +#define EC_GEN_V3_RX_COMP_DESC_W3_DEC_STAT_14_L3_CKS_RES_SEL (1 << 9) +/* Selection for word3[31]:0-macsec decryption status[15] (legac ... */ +#define EC_GEN_V3_RX_COMP_DESC_W3_DEC_STAT_15_CRC_RES_SEL (1 << 10) +/* Selection for words 4-70- word 4 time stamp, word 5-7 reserve ... */ +#define EC_GEN_V3_RX_COMP_DESC_W4_DDP_DEBUG_SEL (1 << 11) + +/**** tx_gpd_cam_addr register ****/ +/* Cam compare table address */ +#define EC_TFW_V3_TX_GPD_CAM_ADDR_VAL_MASK 0x0000001F +#define EC_TFW_V3_TX_GPD_CAM_ADDR_VAL_SHIFT 0 +/* cam entry is valid */ +#define EC_TFW_V3_TX_GPD_CAM_CTRL_VALID (1 << 31) + +/**** tx_gcp_legacy register ****/ +/* 0-choose parameters from table1- choose legacy crce roce para ... */ +#define EC_TFW_V3_TX_GCP_LEGACY_PARAM_SEL (1 << 0) + +/**** tx_gcp_table_addr register ****/ +/* parametrs table address */ +#define EC_TFW_V3_TX_GCP_TABLE_ADDR_VAL_MASK 0x0000001F +#define EC_TFW_V3_TX_GCP_TABLE_ADDR_VAL_SHIFT 0 + +/**** tx_gcp_table_gen register ****/ +/* polynomial selcet +0-crc32(0x104C11DB7) +1-crc32c(0x11EDC6F41) */ +#define EC_TFW_V3_TX_GCP_TABLE_GEN_POLY_SEL (1 << 0) +/* Enable bit complement on crc result */ +#define EC_TFW_V3_TX_GCP_TABLE_GEN_CRC32_BIT_COMP (1 << 1) +/* Enable bit swap on crc result */ +#define EC_TFW_V3_TX_GCP_TABLE_GEN_CRC32_BIT_SWAP (1 << 2) +/* Enable byte swap on crc result */ +#define EC_TFW_V3_TX_GCP_TABLE_GEN_CRC32_BYTE_SWAP (1 << 3) +/* Enable bit swap on input data */ +#define EC_TFW_V3_TX_GCP_TABLE_GEN_DATA_BIT_SWAP (1 << 4) +/* Enable byte swap on input data */ +#define EC_TFW_V3_TX_GCP_TABLE_GEN_DATA_BYTE_SWAP (1 << 5) +/* Number of bytes in trailer which are not part of crc calculat ... */ +#define EC_TFW_V3_TX_GCP_TABLE_GEN_TRAIL_SIZE_MASK 0x000003C0 +#define EC_TFW_V3_TX_GCP_TABLE_GEN_TRAIL_SIZE_SHIFT 6 +/* Number of bytes in header which are not part of crc calculati ... */ +#define EC_TFW_V3_TX_GCP_TABLE_GEN_HEAD_SIZE_MASK 0x00FF0000 +#define EC_TFW_V3_TX_GCP_TABLE_GEN_HEAD_SIZE_SHIFT 16 +/* corrected offset calculation0- subtract head_size (roce)1- ad ... */ +#define EC_TFW_V3_TX_GCP_TABLE_GEN_HEAD_CALC (1 << 24) +/* 0-replace masked bits with 01-replace masked bits with 1 (roc ... */ +#define EC_TFW_V3_TX_GCP_TABLE_GEN_MASK_POLARITY (1 << 25) + +/**** tx_gcp_table_res register ****/ +/* Not in use */ +#define EC_TFW_V3_TX_GCP_TABLE_RES_SEL_MASK 0x0000001F +#define EC_TFW_V3_TX_GCP_TABLE_RES_SEL_SHIFT 0 +/* Not in use */ +#define EC_TFW_V3_TX_GCP_TABLE_RES_EN (1 << 5) +/* Not in use */ +#define EC_TFW_V3_TX_GCP_TABLE_RES_DEF (1 << 6) + +/**** tx_gcp_table_alu_opcode register ****/ +/* first opcode +e.g. (A op1 B) op3 (C op2 D) */ +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPCODE_OPCODE_1_MASK 0x0000003F +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPCODE_OPCODE_1_SHIFT 0 +/* second opcode +e.g. (A op1 B) op3 (C op2 D) */ +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPCODE_OPCODE_2_MASK 0x00000FC0 +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPCODE_OPCODE_2_SHIFT 6 +/* third opcode +e.g. (A op1 B) op3 (C op2 D) */ +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPCODE_OPCODE_3_MASK 0x0003F000 +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPCODE_OPCODE_3_SHIFT 12 + +/**** tx_gcp_table_alu_opsel register ****/ +/* frst opsel, input selection */ +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPSEL_OPSEL_1_MASK 0x0000000F +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPSEL_OPSEL_1_SHIFT 0 +/* second opsel, input selection */ +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPSEL_OPSEL_2_MASK 0x000000F0 +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPSEL_OPSEL_2_SHIFT 4 +/* third opsel, input selction */ +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPSEL_OPSEL_3_MASK 0x00000F00 +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPSEL_OPSEL_3_SHIFT 8 +/* fourth opsel, input selction */ +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPSEL_OPSEL_4_MASK 0x0000F000 +#define EC_TFW_V3_TX_GCP_TABLE_ALU_OPSEL_OPSEL_4_SHIFT 12 + +/**** tx_gcp_table_alu_val register ****/ +/* value for alu input */ +#define EC_TFW_V3_TX_GCP_TABLE_ALU_VAL_VAL_MASK 0x000001FF +#define EC_TFW_V3_TX_GCP_TABLE_ALU_VAL_VAL_SHIFT 0 + +/**** crc_csum_replace register ****/ +/* 0- use table +1- legacy SR-A0 */ +#define EC_TFW_V3_CRC_CSUM_REPLACE_L3_CSUM_LEGACY_SEL (1 << 0) +/* 0- use table +1- legacy SR-A0 */ +#define EC_TFW_V3_CRC_CSUM_REPLACE_L4_CSUM_LEGACY_SEL (1 << 1) +/* 0- use table +1- legacy SR-A0 */ +#define EC_TFW_V3_CRC_CSUM_REPLACE_CRC_LEGACY_SEL (1 << 2) + +/**** crc_csum_replace_table_addr register ****/ +/* parametrs table address */ +#define EC_TFW_V3_CRC_CSUM_REPLACE_TABLE_ADDR_VAL_MASK 0x0000007F +#define EC_TFW_V3_CRC_CSUM_REPLACE_TABLE_ADDR_VAL_SHIFT 0 + +/**** crc_csum_replace_table register ****/ +/* L3 Checksum replace enable */ +#define EC_TFW_V3_CRC_CSUM_REPLACE_TABLE_L3_CSUM_EN (1 << 0) +/* L4 Checksum replace enable */ +#define EC_TFW_V3_CRC_CSUM_REPLACE_TABLE_L4_CSUM_EN (1 << 1) +/* CRC replace enable */ +#define EC_TFW_V3_CRC_CSUM_REPLACE_TABLE_CRC_EN (1 << 2) + +/**** rx_gpd_cam_addr register ****/ +/* Cam compare table address */ +#define EC_RFW_V3_RX_GPD_CAM_ADDR_VAL_MASK 0x0000001F +#define EC_RFW_V3_RX_GPD_CAM_ADDR_VAL_SHIFT 0 +/* cam entry is valid */ +#define EC_RFW_V3_RX_GPD_CAM_CTRL_VALID (1 << 31) + +/**** gpd_p1 register ****/ +/* Location in bytes of the gpd cam data1 in the parser result v ... */ +#define EC_RFW_V3_GPD_P1_OFFSET_MASK 0x000003FF +#define EC_RFW_V3_GPD_P1_OFFSET_SHIFT 0 + +/**** gpd_p2 register ****/ +/* Location in bytes of the gpd cam data2 in the parser result v ... */ +#define EC_RFW_V3_GPD_P2_OFFSET_MASK 0x000003FF +#define EC_RFW_V3_GPD_P2_OFFSET_SHIFT 0 + +/**** gpd_p3 register ****/ +/* Location in bytes of the gpd cam data3 in the parser result v ... */ +#define EC_RFW_V3_GPD_P3_OFFSET_MASK 0x000003FF +#define EC_RFW_V3_GPD_P3_OFFSET_SHIFT 0 + +/**** gpd_p4 register ****/ +/* Location in bytes of the gpd cam data4 in the parser result v ... */ +#define EC_RFW_V3_GPD_P4_OFFSET_MASK 0x000003FF +#define EC_RFW_V3_GPD_P4_OFFSET_SHIFT 0 + +/**** gpd_p5 register ****/ +/* Location in bytes of the gpd cam data5 in the parser result v ... */ +#define EC_RFW_V3_GPD_P5_OFFSET_MASK 0x000003FF +#define EC_RFW_V3_GPD_P5_OFFSET_SHIFT 0 + +/**** gpd_p6 register ****/ +/* Location in bytes of the gpd cam data6 in the parser result v ... */ +#define EC_RFW_V3_GPD_P6_OFFSET_MASK 0x000003FF +#define EC_RFW_V3_GPD_P6_OFFSET_SHIFT 0 + +/**** gpd_p7 register ****/ +/* Location in bytes of the gpd cam data7 in the parser result v ... */ +#define EC_RFW_V3_GPD_P7_OFFSET_MASK 0x000003FF +#define EC_RFW_V3_GPD_P7_OFFSET_SHIFT 0 + +/**** gpd_p8 register ****/ +/* Location in bytes of the gpd cam data8 in the parser result v ... */ +#define EC_RFW_V3_GPD_P8_OFFSET_MASK 0x000003FF +#define EC_RFW_V3_GPD_P8_OFFSET_SHIFT 0 + +/**** rx_gcp_legacy register ****/ +/* 0-choose parameters from table1- choose legacy crce roce para ... */ +#define EC_RFW_V3_RX_GCP_LEGACY_PARAM_SEL (1 << 0) + +/**** rx_gcp_table_addr register ****/ +/* parametrs table address */ +#define EC_RFW_V3_RX_GCP_TABLE_ADDR_VAL_MASK 0x0000001F +#define EC_RFW_V3_RX_GCP_TABLE_ADDR_VAL_SHIFT 0 + +/**** rx_gcp_table_gen register ****/ +/* polynomial selcet +0-crc32(0x104C11DB7) +1-crc32c(0x11EDC6F41) */ +#define EC_RFW_V3_RX_GCP_TABLE_GEN_POLY_SEL (1 << 0) +/* Enable bit complement on crc result */ +#define EC_RFW_V3_RX_GCP_TABLE_GEN_CRC32_BIT_COMP (1 << 1) +/* Enable bit swap on crc result */ +#define EC_RFW_V3_RX_GCP_TABLE_GEN_CRC32_BIT_SWAP (1 << 2) +/* Enable byte swap on crc result */ +#define EC_RFW_V3_RX_GCP_TABLE_GEN_CRC32_BYTE_SWAP (1 << 3) +/* Enable bit swap on input data */ +#define EC_RFW_V3_RX_GCP_TABLE_GEN_DATA_BIT_SWAP (1 << 4) +/* Enable byte swap on input data */ +#define EC_RFW_V3_RX_GCP_TABLE_GEN_DATA_BYTE_SWAP (1 << 5) +/* Number of bytes in trailer which are not part of crc calculat ... */ +#define EC_RFW_V3_RX_GCP_TABLE_GEN_TRAIL_SIZE_MASK 0x000003C0 +#define EC_RFW_V3_RX_GCP_TABLE_GEN_TRAIL_SIZE_SHIFT 6 +/* Number of bytes in header which are not part of crc calculati ... */ +#define EC_RFW_V3_RX_GCP_TABLE_GEN_HEAD_SIZE_MASK 0x00FF0000 +#define EC_RFW_V3_RX_GCP_TABLE_GEN_HEAD_SIZE_SHIFT 16 +/* corrected offset calculation0- subtract head_size (roce)1- ad ... */ +#define EC_RFW_V3_RX_GCP_TABLE_GEN_HEAD_CALC (1 << 24) +/* 0-replace masked bits with 01-replace masked bits with 1 (roc ... */ +#define EC_RFW_V3_RX_GCP_TABLE_GEN_MASK_POLARITY (1 << 25) + +/**** rx_gcp_table_res register ****/ +/* Bit mask for crc/checksum result options for metadata W0[13][ ... */ +#define EC_RFW_V3_RX_GCP_TABLE_RES_SEL_0_MASK 0x0000001F +#define EC_RFW_V3_RX_GCP_TABLE_RES_SEL_0_SHIFT 0 +/* Bit mask for crc/checksum result options for metadata W0[14][ ... */ +#define EC_RFW_V3_RX_GCP_TABLE_RES_SEL_1_MASK 0x000003E0 +#define EC_RFW_V3_RX_GCP_TABLE_RES_SEL_1_SHIFT 5 +/* Bit mask for crc/checksum result options for metadata W3[29][ ... */ +#define EC_RFW_V3_RX_GCP_TABLE_RES_SEL_2_MASK 0x00007C00 +#define EC_RFW_V3_RX_GCP_TABLE_RES_SEL_2_SHIFT 10 +/* Bit mask for crc/checksum result options for metadata W3[30][ ... */ +#define EC_RFW_V3_RX_GCP_TABLE_RES_SEL_3_MASK 0x000F8000 +#define EC_RFW_V3_RX_GCP_TABLE_RES_SEL_3_SHIFT 15 +/* Bit mask for crc/checksum result options for metadata W3[31][ ... */ +#define EC_RFW_V3_RX_GCP_TABLE_RES_SEL_4_MASK 0x01F00000 +#define EC_RFW_V3_RX_GCP_TABLE_RES_SEL_4_SHIFT 20 +/* enable crc result check */ +#define EC_RFW_V3_RX_GCP_TABLE_RES_EN (1 << 25) +/* default value for crc check for non-crc protocol */ +#define EC_RFW_V3_RX_GCP_TABLE_RES_DEF (1 << 26) + +/**** rx_gcp_table_alu_opcode register ****/ +/* first opcode +e.g. (A op1 B) op3 (C op2 D) */ +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPCODE_OPCODE_1_MASK 0x0000003F +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPCODE_OPCODE_1_SHIFT 0 +/* second opcode +e.g. (A op1 B) op3 (C op2 D) */ +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPCODE_OPCODE_2_MASK 0x00000FC0 +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPCODE_OPCODE_2_SHIFT 6 +/* third opcode +e.g. (A op1 B) op3 (C op2 D) */ +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPCODE_OPCODE_3_MASK 0x0003F000 +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPCODE_OPCODE_3_SHIFT 12 + +/**** rx_gcp_table_alu_opsel register ****/ +/* frst opsel, input selection */ +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPSEL_OPSEL_1_MASK 0x0000000F +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPSEL_OPSEL_1_SHIFT 0 +/* second opsel, input selection */ +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPSEL_OPSEL_2_MASK 0x000000F0 +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPSEL_OPSEL_2_SHIFT 4 +/* third opsel, input selction */ +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPSEL_OPSEL_3_MASK 0x00000F00 +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPSEL_OPSEL_3_SHIFT 8 +/* fourth opsel, input selction */ +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPSEL_OPSEL_4_MASK 0x0000F000 +#define EC_RFW_V3_RX_GCP_TABLE_ALU_OPSEL_OPSEL_4_SHIFT 12 + +/**** rx_gcp_table_alu_val register ****/ +/* value for alu input */ +#define EC_RFW_V3_RX_GCP_TABLE_ALU_VAL_VAL_MASK 0x000001FF +#define EC_RFW_V3_RX_GCP_TABLE_ALU_VAL_VAL_SHIFT 0 + +/**** rx_gcp_alu_p1 register ****/ +/* Location in bytes of field 1 in the parser result vector */ +#define EC_RFW_V3_RX_GCP_ALU_P1_OFFSET_MASK 0x000003FF +#define EC_RFW_V3_RX_GCP_ALU_P1_OFFSET_SHIFT 0 +/* Right shift for field 1 in the parser result vector */ +#define EC_RFW_V3_RX_GCP_ALU_P1_SHIFT_MASK 0x000F0000 +#define EC_RFW_V3_RX_GCP_ALU_P1_SHIFT_SHIFT 16 + +/**** rx_gcp_alu_p2 register ****/ +/* Location in bytes of field 2 in the parser result vector */ +#define EC_RFW_V3_RX_GCP_ALU_P2_OFFSET_MASK 0x000003FF +#define EC_RFW_V3_RX_GCP_ALU_P2_OFFSET_SHIFT 0 +/* Right shift for field 2 in the parser result vector */ +#define EC_RFW_V3_RX_GCP_ALU_P2_SHIFT_MASK 0x000F0000 +#define EC_RFW_V3_RX_GCP_ALU_P2_SHIFT_SHIFT 16 + +/**** hs_ctrl_table_addr register ****/ +/* Header split control table address */ +#define EC_RFW_V3_HS_CTRL_TABLE_ADDR_VAL_MASK 0x000000FF +#define EC_RFW_V3_HS_CTRL_TABLE_ADDR_VAL_SHIFT 0 + +/**** hs_ctrl_table register ****/ +/* Header split length select 0 – legacy1 – rdma2 – alu3 – reser ... */ +#define EC_RFW_V3_HS_CTRL_TABLE_SEL_MASK 0x00000003 +#define EC_RFW_V3_HS_CTRL_TABLE_SEL_SHIFT 0 +/* enable header split */ +#define EC_RFW_V3_HS_CTRL_TABLE_ENABLE (1 << 2) + +/**** hs_ctrl_table_alu_opcode register ****/ +/* first opcode +e.g. (A op1 B) op3 (C op2 D) */ +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPCODE_OPCODE_1_MASK 0x0000003F +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPCODE_OPCODE_1_SHIFT 0 +/* second opcode +e.g. (A op1 B) op3 (C op2 D) */ +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPCODE_OPCODE_2_MASK 0x00000FC0 +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPCODE_OPCODE_2_SHIFT 6 +/* third opcode +e.g. (A op1 B) op3 (C op2 D) */ +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPCODE_OPCODE_3_MASK 0x0003F000 +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPCODE_OPCODE_3_SHIFT 12 + +/**** hs_ctrl_table_alu_opsel register ****/ +/* frst opsel, input selection */ +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPSEL_OPSEL_1_MASK 0x0000000F +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPSEL_OPSEL_1_SHIFT 0 +/* second opsel, input selection */ +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPSEL_OPSEL_2_MASK 0x000000F0 +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPSEL_OPSEL_2_SHIFT 4 +/* third opsel, input selction */ +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPSEL_OPSEL_3_MASK 0x00000F00 +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPSEL_OPSEL_3_SHIFT 8 +/* fourth opsel, input selction */ +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPSEL_OPSEL_4_MASK 0x0000F000 +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_OPSEL_OPSEL_4_SHIFT 12 + +/**** hs_ctrl_table_alu_val register ****/ +/* value for alu input */ +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_VAL_VAL_MASK 0x0000FFFF +#define EC_RFW_V3_HS_CTRL_TABLE_ALU_VAL_VAL_SHIFT 0 + +/**** hs_ctrl_cfg register ****/ +/* Header split enable static selction0 – legacy1 – header split ... */ +#define EC_RFW_V3_HS_CTRL_CFG_ENABLE_SEL (1 << 0) +/* Header split length static selction0 – legacy1 – header split ... */ +#define EC_RFW_V3_HS_CTRL_CFG_LENGTH_SEL (1 << 1) + +/**** hs_ctrl_alu_p1 register ****/ +/* Location in bytes of field 1 in the parser result vector */ +#define EC_RFW_V3_HS_CTRL_ALU_P1_OFFSET_MASK 0x000003FF +#define EC_RFW_V3_HS_CTRL_ALU_P1_OFFSET_SHIFT 0 +/* Right shift for field 1 in the parser result vector */ +#define EC_RFW_V3_HS_CTRL_ALU_P1_SHIFT_MASK 0x000F0000 +#define EC_RFW_V3_HS_CTRL_ALU_P1_SHIFT_SHIFT 16 + +/**** hs_ctrl_alu_p2 register ****/ +/* Location in bytes of field 2 in the parser result vector */ +#define EC_RFW_V3_HS_CTRL_ALU_P2_OFFSET_MASK 0x000003FF +#define EC_RFW_V3_HS_CTRL_ALU_P2_OFFSET_SHIFT 0 +/* Right shift for field 2 in the parser result vector */ +#define EC_RFW_V3_HS_CTRL_ALU_P2_SHIFT_MASK 0x000F0000 +#define EC_RFW_V3_HS_CTRL_ALU_P2_SHIFT_SHIFT 16 + +/**** tx_config register ****/ +/* [0] pre increment word swap[1] pre increment byte swap[2] pre ... */ +#define EC_CRYPTO_TX_CONFIG_TWEAK_ENDIANITY_SWAP_MASK 0x0000003F +#define EC_CRYPTO_TX_CONFIG_TWEAK_ENDIANITY_SWAP_SHIFT 0 +/* [0] pre encryption word swap[1] pre encryption byte swap[2] p ... */ +#define EC_CRYPTO_TX_CONFIG_DATA_ENDIANITY_SWAP_MASK 0x00003F00 +#define EC_CRYPTO_TX_CONFIG_DATA_ENDIANITY_SWAP_SHIFT 8 +/* Enabling pipe line optimization */ +#define EC_CRYPTO_TX_CONFIG_PIPE_CALC_EN (1 << 16) + +/**** rx_config register ****/ +/* [0] pre increment word swap[1] pre increment byte swap[2] pre ... */ +#define EC_CRYPTO_RX_CONFIG_TWEAK_ENDIANITY_SWAP_MASK 0x0000003F +#define EC_CRYPTO_RX_CONFIG_TWEAK_ENDIANITY_SWAP_SHIFT 0 +/* [0] pre encryption word swap[1] pre encryption byte swap[2] p ... */ +#define EC_CRYPTO_RX_CONFIG_DATA_ENDIANITY_SWAP_MASK 0x00003F00 +#define EC_CRYPTO_RX_CONFIG_DATA_ENDIANITY_SWAP_SHIFT 8 +/* Enabling pipe line optimization */ +#define EC_CRYPTO_RX_CONFIG_PIPE_CALC_EN (1 << 16) + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_EC_REG_H */ + +/** @} end of ... group */ + + diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_kr.c b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_kr.c new file mode 100644 index 00000000000000..40bacc03a2b142 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_kr.c @@ -0,0 +1,699 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ +/** + * Ethernet + * @{ + * @file al_hal_eth_kr.c + * + * @brief KR HAL driver for main functions (auto-neg, Link Training) + * + */ + +#include "al_hal_eth_kr.h" +#include "al_hal_eth_mac_regs.h" + +/* + * AN(Auto Negotiation) registers + * (read / write indirect with al_eth_an_reg_read/write) + */ +#define AL_ETH_KR_AN_CONTROL 0 +#define AL_ETH_KR_AN_CONTROL_RESTART AL_BIT(9) +#define AL_ETH_KR_AN_CONTROL_ENABLE AL_BIT(12) + +#define AL_ETH_KR_AN_STATUS 1 +#define AL_ETH_KR_AN_STATUS_COMPLETED AL_BIT(5) +#define AL_ETH_KR_AN_STATUS_BASE_PAGE_RECEIVED AL_BIT(6) +#define AL_ETH_KR_AN_STATUS_CHECK_MASK 0xFF8A +#define AL_ETH_KR_AN_STATUS_CHECK_NO_ERROR 0x0008 + +/* AN local advertising */ +#define AL_ETH_KR_AN_ADV0 16 +#define AL_ETH_KR_AN_ADV1 17 +#define AL_ETH_KR_AN_ADV2 18 +/* AN remote advertising */ +#define AL_ETH_KR_AN_REM_ADV0 19 +#define AL_ETH_KR_AN_REM_ADV1 20 +#define AL_ETH_KR_AN_REM_ADV2 21 +/* AN advertising registers parsing */ +/* register 1 */ +#define AL_ETH_KR_AN_ADV1_SEL_FIELD_MASK 0x001f +#define AL_ETH_KR_AN_ADV1_SEL_FIELD_SHIFT 0 +#define AL_ETH_KR_AN_ADV1_ECHOED_NONCE_MASK 0x03e0 +#define AL_ETH_KR_AN_ADV1_ECHOED_NONCE_SHIFT 5 +#define AL_ETH_KR_AN_ADV1_CAPABILITY_MASK 0x1c00 +#define AL_ETH_KR_AN_ADV1_CAPABILITY_SHIFT 10 +#define AL_ETH_KR_AN_ADV1_REM_FAULT_MASK 0x2000 +#define AL_ETH_KR_AN_ADV1_REM_FAULT_SHIFT 13 +#define AL_ETH_KR_AN_ADV1_ACK_MASK 0x4000 +#define AL_ETH_KR_AN_ADV1_ACK_SHIFT 14 +#define AL_ETH_KR_AN_ADV1_NEXT_PAGE_MASK 0x8000 +#define AL_ETH_KR_AN_ADV1_NEXT_PAGE_SHIFT 15 +/* register 2 */ +#define AL_ETH_KR_AN_ADV2_TX_NONCE_MASK 0x001f +#define AL_ETH_KR_AN_ADV2_TX_NONCE_SHIFT 0 +#define AL_ETH_KR_AN_ADV2_TECH_MASK 0xffe0 +#define AL_ETH_KR_AN_ADV2_TECH_SHIFT 5 +/* register 3 */ +/* TECH field in the third register is extended to the field in the second + * register and it is currently reserved (should be always 0) */ +#define AL_ETH_KR_AN_ADV3_TECH_MASK 0x1fff +#define AL_ETH_KR_AN_ADV3_TECH_SHIFT 0 +#define AL_ETH_KR_AN_ADV3_FEC_MASK 0xc000 +#define AL_ETH_KR_AN_ADV3_FEC_SHIFT 14 + +/* + * LT(Link Training) registers + * (read / write indirect with al_eth_pma_reg_read/write) + */ +#define AL_ETH_KR_PMD_CONTROL 150 +#define AL_ETH_KR_PMD_CONTROL_RESTART 0 +#define AL_ETH_KR_PMD_CONTROL_ENABLE 1 + +#define AL_ETH_KR_PMD_STATUS 151 +#define AL_ETH_KR_PMD_STATUS_RECEIVER_COMPLETED_SHIFT 0 +#define AL_ETH_KR_PMD_STATUS_RECEIVER_FRAME_LOCK_SHIFT 1 +#define AL_ETH_KR_PMD_STATUS_RECEIVER_START_UP_PROTO_PROG_SHIFT 2 +#define AL_ETH_KR_PMD_STATUS_FAILURE_SHIFT 3 + +#define AL_ETH_KR_PMD_LP_COEF_UP 152 +#define AL_ETH_KR_PMD_LP_COEF_UP_MINUS_MASK 0x0003 +#define AL_ETH_KR_PMD_LP_COEF_UP_MINUS_SHIFT 0 +#define AL_ETH_KR_PMD_LP_COEF_UP_ZERO_MASK 0x000C +#define AL_ETH_KR_PMD_LP_COEF_UP_ZERO_SHIFT 2 +#define AL_ETH_KR_PMD_LP_COEF_UP_PLUS_MASK 0x0030 +#define AL_ETH_KR_PMD_LP_COEF_UP_PLUS_SHIFT 4 +#define AL_ETH_KR_PMD_LP_COEF_UP_INITIALIZE_SHIFT 12 +#define AL_ETH_KR_PMD_LP_COEF_UP_PRESET_SHIFT 13 + +#define AL_ETH_KR_PMD_LP_STATUS_REPORT 153 +#define AL_ETH_KR_PMD_LP_STATUS_REPORT_MINUS_MASK 0x0003 +#define AL_ETH_KR_PMD_LP_STATUS_REPORT_MINUS_SHIFT 0 +#define AL_ETH_KR_PMD_LP_STATUS_REPORT_ZERO_MASK 0x000C +#define AL_ETH_KR_PMD_LP_STATUS_REPORT_ZERO_SHIFT 2 +#define AL_ETH_KR_PMD_LP_STATUS_REPORT_PLUS_MASK 0x0030 +#define AL_ETH_KR_PMD_LP_STATUS_REPORT_PLUS_SHIFT 4 +#define AL_ETH_KR_PMD_LP_STATUS_RECEIVER_READY_SHIFT 15 + +#define AL_ETH_KR_PMD_LD_COEF_UP 154 +#define AL_ETH_KR_PMD_LD_COEF_UP_MINUS_MASK 0x0003 +#define AL_ETH_KR_PMD_LD_COEF_UP_MINUS_SHIFT 0 +#define AL_ETH_KR_PMD_LD_COEF_UP_ZERO_MASK 0x000C +#define AL_ETH_KR_PMD_LD_COEF_UP_ZERO_SHIFT 2 +#define AL_ETH_KR_PMD_LD_COEF_UP_PLUS_MASK 0x0030 +#define AL_ETH_KR_PMD_LD_COEF_UP_PLUS_SHIFT 4 +#define AL_ETH_KR_PMD_LD_COEF_UP_INITIALIZE_SHIFT 12 +#define AL_ETH_KR_PMD_LD_COEF_UP_PRESET_SHIFT 13 + +#define AL_ETH_KR_PMD_LD_STATUS_REPORT 155 +#define AL_ETH_KR_PMD_LD_STATUS_REPORT_MINUS_MASK 0x0003 +#define AL_ETH_KR_PMD_LD_STATUS_REPORT_MINUS_SHIFT 0 +#define AL_ETH_KR_PMD_LD_STATUS_REPORT_ZERO_MASK 0x000C +#define AL_ETH_KR_PMD_LD_STATUS_REPORT_ZERO_SHIFT 2 +#define AL_ETH_KR_PMD_LD_STATUS_REPORT_PLUS_MASK 0x0030 +#define AL_ETH_KR_PMD_LD_STATUS_REPORT_PLUS_SHIFT 4 +#define AL_ETH_KR_PMD_LD_STATUS_REPORT_RECEIVER_READY_SHIFT 15 + +/** + * read pcs auto-negotiation registers (indirect access) + * + * @param adapter pointer to the private structure + * @param reg_addr address in the an registers + * + * @return the register value + */ +static uint16_t al_eth_an_reg_read( + struct al_hal_eth_adapter *adapter, + uint16_t reg_addr) +{ + uint32_t mmd_data; + + /* indirect access */ + al_reg_write32(&adapter->mac_regs_base->kr.an_addr, reg_addr); + mmd_data = al_reg_read32(&adapter->mac_regs_base->kr.an_data); + mmd_data &= 0xFFFF; + + al_dbg("[%s]: %s - reg %d. val 0x%x", + adapter->name, __func__, reg_addr, mmd_data); + + return (uint16_t)mmd_data; +} + +/** + * write pcs auto-negotiation registers (indirect access) + * + * @param adapter pointer to the private structure + * @param reg_addr address in the an registers + * @param reg_data value to write to the register + * + */ +static void al_eth_an_reg_write( + struct al_hal_eth_adapter *adapter, + uint16_t reg_addr, + uint16_t reg_data) +{ + uint32_t mmd_data = reg_data; + + /* indirect access */ + al_reg_write32(&adapter->mac_regs_base->kr.an_addr, reg_addr); + al_reg_write32(&adapter->mac_regs_base->kr.an_data, reg_data); + + al_dbg("[%s]: %s - reg %d. val 0x%x", + adapter->name, __func__, reg_addr, mmd_data); +} + +/** + * read pcs pma registers (indirect access) + * + * @param adapter pointer to the private structure + * @param reg_addr address in the pma registers + * + * @return the register value + */ +static uint16_t al_eth_pma_reg_read( + struct al_hal_eth_adapter *adapter, + uint16_t reg_addr) +{ + uint32_t mmd_data; + + /* indirect access */ + al_reg_write32(&adapter->mac_regs_base->kr.pma_addr, reg_addr); + mmd_data = al_reg_read32(&adapter->mac_regs_base->kr.pma_data); + mmd_data &= 0xFFFF; + + al_dbg("[%s]: %s - reg %d. val 0x%x", + adapter->name, __func__, reg_addr, mmd_data); + + return (uint16_t)mmd_data; +} + +/** + * write pcs pma registers (indirect access) + * + * @param adapter pointer to the private structure + * @param reg_addr address in the pma registers + * @param reg_data value to write to the register + * + */ +static void al_eth_pma_reg_write( + struct al_hal_eth_adapter *adapter, + uint16_t reg_addr, + uint16_t reg_data) +{ + uint32_t mmd_data = reg_data; + + /* indirect access */ + al_reg_write32(&adapter->mac_regs_base->kr.pma_addr, reg_addr); + al_reg_write32(&adapter->mac_regs_base->kr.pma_data, reg_data); + + al_dbg("[%s]: %s - reg %d. val 0x%x", + adapter->name, __func__, reg_addr, mmd_data); +} + +void al_eth_lp_coeff_up_get( + struct al_hal_eth_adapter *adapter, + struct al_eth_kr_coef_up_data *lpcoeff) +{ + uint16_t reg; + + reg = al_eth_pma_reg_read(adapter, AL_ETH_KR_PMD_LP_COEF_UP); + + lpcoeff->preset = + (AL_REG_BIT_GET( + reg, AL_ETH_KR_PMD_LP_COEF_UP_PRESET_SHIFT) != 0); + + lpcoeff->initialize = + (AL_REG_BIT_GET( + reg, AL_ETH_KR_PMD_LP_COEF_UP_INITIALIZE_SHIFT) != 0); + + lpcoeff->c_minus = AL_REG_FIELD_GET(reg, + AL_ETH_KR_PMD_LP_COEF_UP_MINUS_MASK, + AL_ETH_KR_PMD_LP_COEF_UP_MINUS_SHIFT); + + lpcoeff->c_zero = AL_REG_FIELD_GET(reg, + AL_ETH_KR_PMD_LP_COEF_UP_ZERO_MASK, + AL_ETH_KR_PMD_LP_COEF_UP_ZERO_SHIFT); + + lpcoeff->c_plus = AL_REG_FIELD_GET(reg, + AL_ETH_KR_PMD_LP_COEF_UP_PLUS_MASK, + AL_ETH_KR_PMD_LP_COEF_UP_PLUS_SHIFT); +} + +void al_eth_lp_status_report_get( + struct al_hal_eth_adapter *adapter, + struct al_eth_kr_status_report_data *status) +{ + uint16_t reg; + + reg = al_eth_pma_reg_read(adapter, + AL_ETH_KR_PMD_LP_STATUS_REPORT); + + status->c_minus = AL_REG_FIELD_GET(reg, + AL_ETH_KR_PMD_LP_STATUS_REPORT_MINUS_MASK, + AL_ETH_KR_PMD_LP_STATUS_REPORT_MINUS_SHIFT); + + status->c_zero = AL_REG_FIELD_GET(reg, + AL_ETH_KR_PMD_LP_STATUS_REPORT_ZERO_MASK, + AL_ETH_KR_PMD_LP_STATUS_REPORT_ZERO_SHIFT); + + status->c_plus = AL_REG_FIELD_GET(reg, + AL_ETH_KR_PMD_LP_STATUS_REPORT_PLUS_MASK, + AL_ETH_KR_PMD_LP_STATUS_REPORT_PLUS_SHIFT); + + status->receiver_ready = + (AL_REG_BIT_GET( + reg, AL_ETH_KR_PMD_LP_STATUS_RECEIVER_READY_SHIFT) != 0); + +} + +void al_eth_ld_coeff_up_set( + struct al_hal_eth_adapter *adapter, + struct al_eth_kr_coef_up_data *ldcoeff) +{ + uint16_t reg = 0; + + if (ldcoeff->preset) + AL_REG_BIT_SET(reg, AL_ETH_KR_PMD_LD_COEF_UP_PRESET_SHIFT); + + if (ldcoeff->initialize) + AL_REG_BIT_SET(reg, AL_ETH_KR_PMD_LD_COEF_UP_INITIALIZE_SHIFT); + + AL_REG_FIELD_SET(reg, + AL_ETH_KR_PMD_LD_COEF_UP_MINUS_MASK, + AL_ETH_KR_PMD_LD_COEF_UP_MINUS_SHIFT, + ldcoeff->c_minus); + + AL_REG_FIELD_SET(reg, + AL_ETH_KR_PMD_LD_COEF_UP_ZERO_MASK, + AL_ETH_KR_PMD_LD_COEF_UP_ZERO_SHIFT, + ldcoeff->c_zero); + + AL_REG_FIELD_SET(reg, + AL_ETH_KR_PMD_LD_COEF_UP_PLUS_MASK, + AL_ETH_KR_PMD_LD_COEF_UP_PLUS_SHIFT, + ldcoeff->c_plus); + + al_eth_pma_reg_write(adapter, + AL_ETH_KR_PMD_LD_COEF_UP, + reg); +} + +void al_eth_ld_status_report_set( + struct al_hal_eth_adapter *adapter, + struct al_eth_kr_status_report_data *status) +{ + uint16_t reg = 0; + + AL_REG_FIELD_SET(reg, + AL_ETH_KR_PMD_LD_STATUS_REPORT_MINUS_MASK, + AL_ETH_KR_PMD_LD_STATUS_REPORT_MINUS_SHIFT, + status->c_minus); + + AL_REG_FIELD_SET(reg, + AL_ETH_KR_PMD_LD_STATUS_REPORT_ZERO_MASK, + AL_ETH_KR_PMD_LD_STATUS_REPORT_ZERO_SHIFT, + status->c_zero); + + AL_REG_FIELD_SET(reg, + AL_ETH_KR_PMD_LD_STATUS_REPORT_PLUS_MASK, + AL_ETH_KR_PMD_LD_STATUS_REPORT_PLUS_SHIFT, + status->c_plus); + + if (status->receiver_ready) + AL_REG_BIT_SET(reg, + AL_ETH_KR_PMD_LD_STATUS_REPORT_RECEIVER_READY_SHIFT); + + al_eth_pma_reg_write(adapter, + AL_ETH_KR_PMD_LD_STATUS_REPORT, + reg); +} + +al_bool al_eth_kr_receiver_frame_lock_get(struct al_hal_eth_adapter *adapter) +{ + uint16_t reg; + + reg = al_eth_pma_reg_read(adapter, AL_ETH_KR_PMD_STATUS); + + return (AL_REG_BIT_GET(reg, + AL_ETH_KR_PMD_STATUS_RECEIVER_FRAME_LOCK_SHIFT) != 0); +} + +al_bool al_eth_kr_startup_proto_prog_get(struct al_hal_eth_adapter *adapter) +{ + uint16_t reg; + + reg = al_eth_pma_reg_read(adapter, AL_ETH_KR_PMD_STATUS); + + return (AL_REG_BIT_GET( + reg, AL_ETH_KR_PMD_STATUS_RECEIVER_START_UP_PROTO_PROG_SHIFT) != 0); +} + +al_bool al_eth_kr_training_status_fail_get(struct al_hal_eth_adapter *adapter) +{ + uint16_t reg; + + reg = al_eth_pma_reg_read(adapter, AL_ETH_KR_PMD_STATUS); + + return (AL_REG_BIT_GET(reg, AL_ETH_KR_PMD_STATUS_FAILURE_SHIFT) != 0); +} + +void al_eth_receiver_ready_set(struct al_hal_eth_adapter *adapter) +{ + al_eth_pma_reg_write(adapter, AL_ETH_KR_PMD_STATUS, 1); +} + +/*************************** auto negotiation *********************************/ +static int al_eth_kr_an_validate_adv(struct al_hal_eth_adapter *adapter, + struct al_eth_an_adv *an_adv) +{ + al_assert(adapter); + + if (an_adv == NULL) + return 0; + + if (an_adv->selector_field != 1) { + al_err("[%s]: %s failed on selector_field (%d)\n", + adapter->name, __func__, an_adv->selector_field); + return -EINVAL; + } + + if (an_adv->capability & AL_BIT(2)) { + al_err("[%s]: %s failed on capability bit 2 (%d)\n", + adapter->name, __func__, an_adv->capability); + return -EINVAL; + } + + if (an_adv->remote_fault) { + al_err("[%s]: %s failed on remote_fault (%d)\n", + adapter->name, __func__, an_adv->remote_fault); + return -EINVAL; + } + + if (an_adv->acknowledge) { + al_err("[%s]: %s failed on acknowledge (%d)\n", + adapter->name, __func__, an_adv->acknowledge); + return -EINVAL; + } + + if (an_adv->next_page) { + al_err("[%s]: %s failed on next_page (%d)\n", + adapter->name, __func__, an_adv->next_page); + return -EINVAL; + } + + if (an_adv->technology != AL_ETH_AN_TECH_10GBASE_KR) { + al_err("[%s]: %s failed on technology (0x%x)\n", + adapter->name, __func__, an_adv->technology); + return -EINVAL; + } + return 0; +} + +static int al_eth_kr_an_write_adv(struct al_hal_eth_adapter *adapter, + struct al_eth_an_adv *an_adv) +{ + uint16_t reg; + + if(an_adv == NULL) + return 0; + + reg = 0; + AL_REG_FIELD_SET(reg, AL_ETH_KR_AN_ADV1_SEL_FIELD_MASK, + AL_ETH_KR_AN_ADV1_SEL_FIELD_SHIFT, + an_adv->selector_field); + + AL_REG_FIELD_SET(reg, AL_ETH_KR_AN_ADV1_ECHOED_NONCE_MASK, + AL_ETH_KR_AN_ADV1_ECHOED_NONCE_SHIFT, + an_adv->echoed_nonce); + + AL_REG_FIELD_SET(reg, AL_ETH_KR_AN_ADV1_CAPABILITY_MASK, + AL_ETH_KR_AN_ADV1_CAPABILITY_SHIFT, + an_adv->capability); + + AL_REG_BIT_VAL_SET(reg, AL_ETH_KR_AN_ADV1_REM_FAULT_SHIFT, + an_adv->remote_fault); + + AL_REG_BIT_VAL_SET(reg, AL_ETH_KR_AN_ADV1_ACK_SHIFT, + an_adv->acknowledge); + + AL_REG_BIT_VAL_SET(reg, AL_ETH_KR_AN_ADV1_NEXT_PAGE_SHIFT, + an_adv->next_page); + + al_eth_an_reg_write(adapter, AL_ETH_KR_AN_ADV0, reg); + + reg = 0; + AL_REG_FIELD_SET(reg, AL_ETH_KR_AN_ADV2_TX_NONCE_MASK, + AL_ETH_KR_AN_ADV2_TX_NONCE_SHIFT, + an_adv->transmitted_nonce); + + AL_REG_FIELD_SET(reg, AL_ETH_KR_AN_ADV2_TECH_MASK, + AL_ETH_KR_AN_ADV2_TECH_SHIFT, + an_adv->technology); + + al_eth_an_reg_write(adapter, AL_ETH_KR_AN_ADV1, reg); + + reg = 0; + AL_REG_FIELD_SET(reg, AL_ETH_KR_AN_ADV3_TECH_MASK, + AL_ETH_KR_AN_ADV3_TECH_SHIFT, + an_adv->technology >> 11); + + AL_REG_FIELD_SET(reg, AL_ETH_KR_AN_ADV3_FEC_MASK, + AL_ETH_KR_AN_ADV3_FEC_SHIFT, + an_adv->fec_capability); + + al_eth_an_reg_write(adapter, AL_ETH_KR_AN_ADV2, reg); + + return 0; +} + +void al_eth_kr_an_read_adv(struct al_hal_eth_adapter *adapter, + struct al_eth_an_adv *an_adv) +{ + int16_t reg; + + al_assert(an_adv != NULL); + + + reg = al_eth_an_reg_read(adapter, AL_ETH_KR_AN_REM_ADV0); + + an_adv->selector_field = AL_REG_FIELD_GET(reg, + AL_ETH_KR_AN_ADV1_SEL_FIELD_MASK, + AL_ETH_KR_AN_ADV1_SEL_FIELD_SHIFT); + + an_adv->echoed_nonce = AL_REG_FIELD_GET(reg, + AL_ETH_KR_AN_ADV1_ECHOED_NONCE_MASK, + AL_ETH_KR_AN_ADV1_ECHOED_NONCE_SHIFT); + + an_adv->capability = AL_REG_FIELD_GET(reg, + AL_ETH_KR_AN_ADV1_CAPABILITY_MASK, + AL_ETH_KR_AN_ADV1_CAPABILITY_SHIFT); + + an_adv->remote_fault = AL_REG_BIT_GET(reg, + AL_ETH_KR_AN_ADV1_REM_FAULT_SHIFT); + + an_adv->acknowledge = AL_REG_BIT_GET(reg, + AL_ETH_KR_AN_ADV1_ACK_SHIFT); + + an_adv->next_page = AL_REG_BIT_GET(reg, + AL_ETH_KR_AN_ADV1_NEXT_PAGE_SHIFT); + + + reg = al_eth_an_reg_read(adapter, AL_ETH_KR_AN_REM_ADV1); + + an_adv->transmitted_nonce = AL_REG_FIELD_GET(reg, + AL_ETH_KR_AN_ADV2_TX_NONCE_MASK, + AL_ETH_KR_AN_ADV2_TX_NONCE_SHIFT); + + an_adv->technology = AL_REG_FIELD_GET(reg, + AL_ETH_KR_AN_ADV2_TECH_MASK, + AL_ETH_KR_AN_ADV2_TECH_SHIFT); + + + reg = al_eth_an_reg_read(adapter, AL_ETH_KR_AN_REM_ADV2); + + an_adv->technology |= (AL_REG_FIELD_GET(reg, + AL_ETH_KR_AN_ADV3_TECH_MASK, + AL_ETH_KR_AN_ADV3_TECH_SHIFT) << 11); + + an_adv->fec_capability = AL_REG_FIELD_GET(reg, + AL_ETH_KR_AN_ADV3_FEC_MASK, + AL_ETH_KR_AN_ADV3_FEC_SHIFT); +} + + +int al_eth_kr_an_init(struct al_hal_eth_adapter *adapter, + struct al_eth_an_adv *an_adv) +{ + int rc; + + rc = al_eth_kr_an_validate_adv(adapter, an_adv); + if (rc) + return rc; + + rc = al_eth_kr_an_write_adv(adapter, an_adv); + if (rc) + return rc; + + /* clear status */ + al_eth_an_reg_read(adapter, AL_ETH_KR_AN_STATUS); + + al_dbg("[%s]: autonegotiation initialized successfully", adapter->name); + return 0; +} + +int al_eth_kr_an_start(struct al_hal_eth_adapter *adapter, al_bool lt_enable) +{ + al_dbg("Eth [%s]: enable autonegotiation. lt_en %s", + adapter->name, (lt_enable == AL_TRUE) ? "yes" : "no"); + + al_eth_pma_reg_write(adapter, + AL_ETH_KR_PMD_CONTROL, + AL_BIT(AL_ETH_KR_PMD_CONTROL_RESTART)); + + al_eth_an_reg_write(adapter, + AL_ETH_KR_AN_CONTROL, + (AL_ETH_KR_AN_CONTROL_ENABLE | + AL_ETH_KR_AN_CONTROL_RESTART)); + + if (lt_enable == AL_TRUE) { + al_eth_kr_lt_initialize(adapter); + } + + return 0; +} + +void al_eth_kr_an_stop(struct al_hal_eth_adapter *adapter) +{ + al_eth_an_reg_write(adapter, AL_ETH_KR_AN_CONTROL, 0); +} + +void al_eth_kr_an_status_check(struct al_hal_eth_adapter *adapter, + al_bool *page_received, + al_bool *an_completed, + al_bool *error) +{ + uint16_t reg; + + reg = al_eth_an_reg_read(adapter, AL_ETH_KR_AN_STATUS); + + if ((reg & AL_ETH_KR_AN_STATUS_CHECK_MASK) != + AL_ETH_KR_AN_STATUS_CHECK_NO_ERROR) { + al_err("[%s]: %s AN_STATUS (0x%x) indicated error\n", + adapter->name, __func__, reg); + + *error = AL_TRUE; + } + + if (reg & AL_ETH_KR_AN_STATUS_BASE_PAGE_RECEIVED) + *page_received = AL_TRUE; + else + *page_received = AL_FALSE; + + if (reg & AL_ETH_KR_AN_STATUS_COMPLETED) + *an_completed = AL_TRUE; + else + *an_completed = AL_FALSE; +} + + +/****************************** KR Link Training *****************************/ +void al_eth_kr_lt_restart(struct al_hal_eth_adapter *adapter) +{ + al_dbg("[%s]: KR LT Restart Link Training.\n", adapter->name); + + al_eth_pma_reg_write(adapter, + AL_ETH_KR_PMD_CONTROL, + (AL_BIT(AL_ETH_KR_PMD_CONTROL_ENABLE) | + AL_BIT(AL_ETH_KR_PMD_CONTROL_RESTART))); +} + +void al_eth_kr_lt_stop(struct al_hal_eth_adapter *adapter) +{ + al_dbg("[%s]: KR LT Stop Link Training.\n", adapter->name); + + al_eth_pma_reg_write(adapter, + AL_ETH_KR_PMD_CONTROL, + AL_BIT(AL_ETH_KR_PMD_CONTROL_RESTART)); +} + +void al_eth_kr_lt_initialize(struct al_hal_eth_adapter *adapter) +{ + al_dbg("[%s]: KR LT Initialize.\n", adapter->name); + + /* Reset LT state machine */ + al_eth_kr_lt_stop(adapter); + + /* clear receiver status */ + al_eth_pma_reg_write(adapter, AL_ETH_KR_PMD_STATUS, 0); + + /* Coefficient Update to all zero (no command, hold) */ + al_eth_pma_reg_write(adapter, AL_ETH_KR_PMD_LD_COEF_UP, 0); + /* Coefficient Status to all zero (not_updated) */ + al_eth_pma_reg_write(adapter, AL_ETH_KR_PMD_LD_STATUS_REPORT, 0); + + /* start */ + al_eth_kr_lt_restart(adapter); +} + +al_bool al_eth_kr_lt_frame_lock_wait(struct al_hal_eth_adapter *adapter, + uint32_t timeout) +{ + uint32_t loop; + uint16_t reg = 0; + + for (loop = 0; loop < timeout; loop++) { + reg = al_eth_pma_reg_read(adapter, AL_ETH_KR_PMD_STATUS); + + if (AL_REG_BIT_GET(reg, AL_ETH_KR_PMD_STATUS_FAILURE_SHIFT)) { + al_info("[%s]: Failed on Training Failure." + " loops %d PMD STATUS 0x%04x\n", + adapter->name, loop, reg); + + return AL_FALSE; + } + if (AL_REG_BIT_GET(reg, + AL_ETH_KR_PMD_STATUS_RECEIVER_FRAME_LOCK_SHIFT)) { + al_dbg("[%s]: Frame lock received." + " loops %d PMD STATUS 0x%04x\n", + adapter->name, loop, reg); + + return AL_TRUE; + } + al_udelay(1); + } + al_info("[%s]: Failed on timeout. PMD STATUS 0x%04x\n", + adapter->name, reg); + + return AL_FALSE; +} diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_kr.h b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_kr.h new file mode 100644 index 00000000000000..cc41e526c19c13 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_kr.h @@ -0,0 +1,307 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ +/** + * @defgroup group_eth_kr_api API + * Ethernet KR auto-neg and link-training driver API + * @ingroup group_eth + * @{ + * @file al_hal_eth_kr.h + * + * @brief Header file for KR driver + * + * + */ + +#ifndef __AL_HAL_ETH_KR_H__ +#define __AL_HAL_ETH_KR_H__ + +#include "al_hal_eth.h" + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/* AN (Auto-negotiation) Advertisement Registers */ +struct al_eth_an_adv { + /* constant value defining 802.3ap support. + * The suggested value is 0x01.*/ + uint8_t selector_field; + /* Contains arbitrary data. */ + uint8_t echoed_nonce; + /* pause capability. */ + uint8_t capability; + /* Set to 1 to indicate a Remote Fault condition. + * Set to 0 to indicate normal operation.*/ + uint8_t remote_fault; + /* Should always be set to 0. */ + uint8_t acknowledge; + /* Set to 1 to indicate that the device has next pages to send. + * Set to 0 to indicate that that device has no next pages to send. */ + uint8_t next_page; + /* Must be set to an arbitrary value. + * Two devices must have a different nonce for autonegotiation to + * operate (a loopback will not allow autonegotiation to complete). */ + uint8_t transmitted_nonce; + uint32_t technology; +#define AL_ETH_AN_TECH_1000BASE_KX AL_BIT(0) +#define AL_ETH_AN_TECH_10GBASE_KX4 AL_BIT(1) +#define AL_ETH_AN_TECH_10GBASE_KR AL_BIT(2) +#define AL_ETH_AN_TECH_40GBASE_KR4 AL_BIT(3) +#define AL_ETH_AN_TECH_40GBASE_CR4 AL_BIT(4) +#define AL_ETH_AN_TECH_100GBASE_CR AL_BIT(5) + uint8_t fec_capability; +}; + +enum al_eth_kr_cl72_cstate { + C72_CSTATE_NOT_UPDATED = 0, + C72_CSTATE_UPDATED = 1, + C72_CSTATE_MIN = 2, + C72_CSTATE_MAX = 3, +}; + +enum al_eth_kr_cl72_coef_op { + AL_PHY_KR_COEF_UP_HOLD = 0, + AL_PHY_KR_COEF_UP_INC = 1, + AL_PHY_KR_COEF_UP_DEC = 2, + AL_PHY_KR_COEF_UP_RESERVED = 3 +}; + +struct al_eth_kr_coef_up_data { + enum al_eth_kr_cl72_coef_op c_zero; + enum al_eth_kr_cl72_coef_op c_plus; + enum al_eth_kr_cl72_coef_op c_minus; + al_bool preset; + al_bool initialize; +}; + +struct al_eth_kr_status_report_data { + enum al_eth_kr_cl72_cstate c_zero; + enum al_eth_kr_cl72_cstate c_plus; + enum al_eth_kr_cl72_cstate c_minus; + al_bool receiver_ready; +}; + +/** + * get the last received coefficient update message from the link partner + * + * @param adapter pointer to the private structure + * @param lpcoeff coeff update received + * + */ +void al_eth_lp_coeff_up_get( + struct al_hal_eth_adapter *adapter, + struct al_eth_kr_coef_up_data *lpcoeff); + +/** + * get the last received status report message from the link partner + * + * @param adapter pointer to the private structure + * @param status status report received + * + */ +void al_eth_lp_status_report_get( + struct al_hal_eth_adapter *adapter, + struct al_eth_kr_status_report_data *status); + +/** + * set the coefficient data for the next message that will be sent to lp + * + * @param adapter pointer to the private structure + * @param ldcoeff coeff update to send + * + */ +void al_eth_ld_coeff_up_set( + struct al_hal_eth_adapter *adapter, + struct al_eth_kr_coef_up_data *ldcoeff); + +/** + * set the status report message for the next message that will be sent to lp + * + * @param adapter pointer to the private structure + * @param status status report to send + * + */ +void al_eth_ld_status_report_set( + struct al_hal_eth_adapter *adapter, + struct al_eth_kr_status_report_data *status); + +/** + * get the receiver frame lock status + * + * @param adapter pointer to the private structure + * + * @return true if Training frame delineation is detected, otherwise false. + */ +al_bool al_eth_kr_receiver_frame_lock_get(struct al_hal_eth_adapter *adapter); + +/** + * get the start up protocol progress status + * + * @param adapter pointer to the private structure + * + * @return true if the startup protocol is in progress. + */ +al_bool al_eth_kr_startup_proto_prog_get(struct al_hal_eth_adapter *adapter); + +/** + * indicate the receiver is ready (the link training is completed) + * + * @param adapter pointer to the private structure + * + */ +void al_eth_receiver_ready_set(struct al_hal_eth_adapter *adapter); + +/** + * read Training failure status. + * + * @param adapter pointer to the private structure + * + *@return true if Training failure has been detected. + */ +al_bool al_eth_kr_training_status_fail_get(struct al_hal_eth_adapter *adapter); + +/****************************** auto negotiation *******************************/ +/** + * Initialize Auto-negotiation + * - Program Ability Registers (Advertisement Registers) + * - Clear Status latches + * @param adapter pointer to the private structure + * @param an_adv pointer to the AN Advertisement Registers structure + * when NULL, the registers will not be updated. + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_kr_an_init(struct al_hal_eth_adapter *adapter, + struct al_eth_an_adv *an_adv); + +/** + * Enable/Restart Auto-negotiation + * + * @param adapter pointer to the private structure + * @param lt_enable initialize link training as well + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_kr_an_start(struct al_hal_eth_adapter *adapter, + al_bool lt_enable); + + +/** + * Stop Auto-negotiation + * + * Stopping the auto-negotiation will prevent the mac from sending the last page + * to the link partner in case it start the AN again. It must be called after + * link training is completed or the software will lose sync with the HW state + * machine + * + * @param adapter pointer to the private structure + * + */ +void al_eth_kr_an_stop(struct al_hal_eth_adapter *adapter); + +/** + * Check Auto-negotiation event done + * + * @param adapter pointer to the private structure + * @param page_received Set to true if the AN page received indication is set. + * Set to false otherwise. + * @param an_completed Set to true of the AN completed indication is set. + * Set to false otherwise. + * @param error Set to true if any error encountered + * + */ +void al_eth_kr_an_status_check(struct al_hal_eth_adapter *adapter, + al_bool *page_received, + al_bool *an_completed, + al_bool *error); + +/** + * Read the remote auto-negotiation advertising. + * This function is safe to called after al_eth_kr_an_status_check returned + * with page_received set. + * + * @param adapter pointer to the private structure + * @param an_adv pointer to the AN Advertisement Registers structure + * + */ +void al_eth_kr_an_read_adv(struct al_hal_eth_adapter *adapter, + struct al_eth_an_adv *an_adv); + +/****************************** link training **********************************/ +/** + * Initialize Link-training. + * Clear the status register and set the local coefficient update and status + * to zero. + * + * @param adapter pointer to the private structure + * + */ +void al_eth_kr_lt_initialize(struct al_hal_eth_adapter *adapter); + +/** + * Wait for frame lock. + * + * @param adapter pointer to the private structure + * @param timeout timeout in usec. + * + * @return true if frame lock received. false otherwise. + */ +al_bool al_eth_kr_lt_frame_lock_wait(struct al_hal_eth_adapter *adapter, + uint32_t timeout); + +/** + * reset the 10GBase- KR startup protocol and begin its operation + * + * @param adapter pointer to the private structure + * + */ +void al_eth_kr_lt_restart(struct al_hal_eth_adapter *adapter); + +/** + * reset the 10GBase- KR startup protocol and end its operation + * + * @param adapter pointer to the private structure + * + */ +void al_eth_kr_lt_stop(struct al_hal_eth_adapter *adapter); + +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +#endif /*__AL_HAL_ETH_KR_H__*/ +/** @} end of Ethernet kr group */ diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_mac_regs.h b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_mac_regs.h new file mode 100644 index 00000000000000..e75b2830572762 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_mac_regs.h @@ -0,0 +1,1340 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @{ + * @file al_hal_eth_mac_regs.h + * + * @brief Ethernet MAC registers + * + */ + +#ifndef __AL_HAL_ETH_MAC_REG_H +#define __AL_HAL_ETH_MAC_REG_H + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + +struct al_eth_mac_1g { + /* [0x0] */ + uint32_t rev; + uint32_t scratch; + uint32_t cmd_cfg; + uint32_t mac_0; + /* [0x10] */ + uint32_t mac_1; + uint32_t frm_len; + uint32_t pause_quant; + uint32_t rx_section_empty; + /* [0x20] */ + uint32_t rx_section_full; + uint32_t tx_section_empty; + uint32_t tx_section_full; + uint32_t rx_almost_empty; + /* [0x30] */ + uint32_t rx_almost_full; + uint32_t tx_almost_empty; + uint32_t tx_almost_full; + uint32_t mdio_addr0; + /* [0x40] */ + uint32_t mdio_addr1; + uint32_t Reserved[5]; + /* [0x58] */ + uint32_t reg_stat; + uint32_t tx_ipg_len; + /* [0x60] */ + uint32_t Reserved1[104]; + /* [0x200] */ + uint32_t phy_regs_base; + uint32_t Reserved2[127]; +}; + +struct al_eth_mac_10g { + /* [0x0] */ + uint32_t rev; + uint32_t scratch; + uint32_t cmd_cfg; + uint32_t mac_0; + /* [0x10] */ + uint32_t mac_1; + uint32_t frm_len; + uint32_t Reserved; + uint32_t rx_fifo_sections; + /* [0x20] */ + uint32_t tx_fifo_sections; + uint32_t rx_fifo_almost_f_e; + uint32_t tx_fifo_almost_f_e; + uint32_t hashtable_load; + /* [0x30] */ + uint32_t mdio_cfg_status; + uint16_t mdio_cmd; + uint16_t reserved1; + uint16_t mdio_data; + uint16_t reserved2; + uint16_t mdio_regaddr; + uint16_t reserved3; + /* [0x40] */ + uint32_t status; + uint32_t tx_ipg_len; + uint32_t Reserved1[3]; + /* [0x54] */ + uint32_t cl01_pause_quanta; + uint32_t cl23_pause_quanta; + uint32_t cl45_pause_quanta; + /* [0x60] */ + uint32_t cl67_pause_quanta; + uint32_t cl01_quanta_thresh; + uint32_t cl23_quanta_thresh; + uint32_t cl45_quanta_thresh; + /* [0x70] */ + uint32_t cl67_quanta_thresh; + uint32_t rx_pause_status; + uint32_t Reserved2; + uint32_t ts_timestamp; + /* [0x80] */ + + uint32_t Reserved3[160]; + + /* [0x300] */ + uint32_t control; + uint32_t status_reg; + uint32_t phy_id[2]; + /* [0x310] */ + uint32_t dev_ability; + uint32_t partner_ability; + uint32_t an_expansion; + uint32_t device_np; + /* [0x320] */ + uint32_t partner_np; + uint32_t Reserved4[9]; + + /* [0x348] */ + uint32_t link_timer_lo; + uint32_t link_timer_hi; + /* [0x350] */ + uint32_t if_mode; + + uint32_t Reserved5[43]; +}; + +struct al_eth_mac_gen { + /* [0x0] Ethernet Controller Version */ + uint32_t version; + uint32_t rsrvd_0[2]; + /* [0xc] MAC selection configuration */ + uint32_t cfg; + /* [0x10] 10/100/1000 MAC external configuration */ + uint32_t mac_1g_cfg; + /* [0x14] 10/100/1000 MAC status */ + uint32_t mac_1g_stat; + /* [0x18] RGMII external configuration */ + uint32_t rgmii_cfg; + /* [0x1c] RGMII status */ + uint32_t rgmii_stat; + /* [0x20] 1/2.5/10G MAC external configuration */ + uint32_t mac_10g_cfg; + /* [0x24] 1/2.5/10G MAC status */ + uint32_t mac_10g_stat; + /* [0x28] XAUI PCS configuration */ + uint32_t xaui_cfg; + /* [0x2c] XAUI PCS status */ + uint32_t xaui_stat; + /* [0x30] RXAUI PCS configuration */ + uint32_t rxaui_cfg; + /* [0x34] RXAUI PCS status */ + uint32_t rxaui_stat; + /* [0x38] Signal detect configuration */ + uint32_t sd_cfg; + /* [0x3c] MDIO control register for MDIO interface 1 */ + uint32_t mdio_ctrl_1; + /* [0x40] MDIO information register for MDIO interface 1 */ + uint32_t mdio_1; + /* [0x44] MDIO control register for MDIO interface 2 */ + uint32_t mdio_ctrl_2; + /* [0x48] MDIO information register for MDIO interface 2 */ + uint32_t mdio_2; + /* [0x4c] XGMII 32 to 64 data FIFO control */ + uint32_t xgmii_dfifo_32_64; + /* [0x50] Reserved 1 out */ + uint32_t mac_res_1_out; + /* [0x54] XGMII 64 to 32 data FIFO control */ + uint32_t xgmii_dfifo_64_32; + /* [0x58] Reserved 1 in */ + uint32_t mac_res_1_in; + /* [0x5c] SerDes TX FIFO control */ + uint32_t sd_fifo_ctrl; + /* [0x60] SerDes TX FIFO status */ + uint32_t sd_fifo_stat; + /* [0x64] SerDes in/out selection */ + uint32_t mux_sel; + /* [0x68] Clock configuration */ + uint32_t clk_cfg; + uint32_t rsrvd_1; + /* [0x70] LOS and SD selection */ + uint32_t los_sel; + /* [0x74] RGMII selection configuration */ + uint32_t rgmii_sel; + /* [0x78] Ethernet LED configuration */ + uint32_t led_cfg; + uint32_t rsrvd[33]; +}; +struct al_eth_mac_kr { + /* [0x0] PCS register file address */ + uint32_t pcs_addr; + /* [0x4] PCS register file data */ + uint32_t pcs_data; + /* [0x8] AN register file address */ + uint32_t an_addr; + /* [0xc] AN register file data */ + uint32_t an_data; + /* [0x10] PMA register file address */ + uint32_t pma_addr; + /* [0x14] PMA register file data */ + uint32_t pma_data; + /* [0x18] MTIP register file address */ + uint32_t mtip_addr; + /* [0x1c] MTIP register file data */ + uint32_t mtip_data; + /* [0x20] KR PCS config */ + uint32_t pcs_cfg; + /* [0x24] KR PCS status */ + uint32_t pcs_stat; + uint32_t rsrvd[54]; +}; +struct al_eth_mac_sgmii { + /* [0x0] PCS register file address */ + uint32_t reg_addr; + /* [0x4] PCS register file data */ + uint32_t reg_data; + /* [0x8] PCS clock divider configuration */ + uint32_t clk_div; + /* [0xc] PCS Status */ + uint32_t link_stat; + uint32_t rsrvd[60]; +}; +struct al_eth_mac_stat { + /* [0x0] Receive rate matching error */ + uint32_t match_fault; + /* [0x4] EEE, number of times the MAC went into low power mo ... */ + uint32_t eee_in; + /* [0x8] EEE, number of times the MAC went out of low power ... */ + uint32_t eee_out; + /* [0xc] 40G PCS, FEC corrected error indication */ + uint32_t v3_pcs_40g_ll_cerr_0; + /* [0x10] 40G PCS, FEC corrected error indication */ + uint32_t v3_pcs_40g_ll_cerr_1; + /* [0x14] 40G PCS, FEC corrected error indication */ + uint32_t v3_pcs_40g_ll_cerr_2; + /* [0x18] 40G PCS, FEC corrected error indication */ + uint32_t v3_pcs_40g_ll_cerr_3; + /* [0x1c] 40G PCS, FEC uncorrectable error indication */ + uint32_t v3_pcs_40g_ll_ncerr_0; + /* [0x20] 40G PCS, FEC uncorrectable error indication */ + uint32_t v3_pcs_40g_ll_ncerr_1; + /* [0x24] 40G PCS, FEC uncorrectable error indication */ + uint32_t v3_pcs_40g_ll_ncerr_2; + /* [0x28] 40G PCS, FEC uncorrectable error indication */ + uint32_t v3_pcs_40g_ll_ncerr_3; + /* [0x2c] 10G_LL PCS, FEC corrected error indication */ + uint32_t v3_pcs_10g_ll_cerr; + /* [0x30] 10G_LL PCS, FEC uncorrectable error indication */ + uint32_t v3_pcs_10g_ll_ncerr; + uint32_t rsrvd[51]; +}; +struct al_eth_mac_stat_lane { + /* [0x0] Character error */ + uint32_t char_err; + /* [0x4] Disparity error */ + uint32_t disp_err; + /* [0x8] Comma detection */ + uint32_t pat; + uint32_t rsrvd[13]; +}; +struct al_eth_mac_gen_v3 { + /* [0x0] ASYNC FIFOs control */ + uint32_t afifo_ctrl; + /* [0x4] TX ASYNC FIFO configuration */ + uint32_t tx_afifo_cfg_1; + /* [0x8] TX ASYNC FIFO configuration */ + uint32_t tx_afifo_cfg_2; + /* [0xc] TX ASYNC FIFO configuration */ + uint32_t tx_afifo_cfg_3; + /* [0x10] TX ASYNC FIFO configuration */ + uint32_t tx_afifo_cfg_4; + /* [0x14] TX ASYNC FIFO configuration */ + uint32_t tx_afifo_cfg_5; + /* [0x18] RX ASYNC FIFO configuration */ + uint32_t rx_afifo_cfg_1; + /* [0x1c] RX ASYNC FIFO configuration */ + uint32_t rx_afifo_cfg_2; + /* [0x20] RX ASYNC FIFO configuration */ + uint32_t rx_afifo_cfg_3; + /* [0x24] RX ASYNC FIFO configuration */ + uint32_t rx_afifo_cfg_4; + /* [0x28] RX ASYNC FIFO configuration */ + uint32_t rx_afifo_cfg_5; + /* [0x2c] MAC selection configuration */ + uint32_t mac_sel; + /* [0x30] 10G LL MAC configuration */ + uint32_t mac_10g_ll_cfg; + /* [0x34] 10G LL MAC control */ + uint32_t mac_10g_ll_ctrl; + /* [0x38] 10G LL PCS configuration */ + uint32_t pcs_10g_ll_cfg; + /* [0x3c] 10G LL PCS status */ + uint32_t pcs_10g_ll_status; + /* [0x40] 40G LL PCS configuration */ + uint32_t pcs_40g_ll_cfg; + /* [0x44] 40G LL PCS status */ + uint32_t pcs_40g_ll_status; + /* [0x48] PCS 40G register file address */ + uint32_t pcs_40g_ll_addr; + /* [0x4c] PCS 40G register file data */ + uint32_t pcs_40g_ll_data; + /* [0x50] 40G LL MAC configuration */ + uint32_t mac_40g_ll_cfg; + /* [0x54] 40G LL MAC status */ + uint32_t mac_40g_ll_status; + /* [0x58] Preamble configuration (high [55:32]) */ + uint32_t preamble_cfg_high; + /* [0x5c] Preamble configuration (low [31:0]) */ + uint32_t preamble_cfg_low; + /* [0x60] MAC 40G register file address */ + uint32_t mac_40g_ll_addr; + /* [0x64] MAC 40G register file data */ + uint32_t mac_40g_ll_data; + /* [0x68] 40G LL MAC control */ + uint32_t mac_40g_ll_ctrl; + /* [0x6c] PCS 40G register file address */ + uint32_t pcs_40g_fec_91_ll_addr; + /* [0x70] PCS 40G register file data */ + uint32_t pcs_40g_fec_91_ll_data; + /* [0x74] 40G LL PCS EEE configuration */ + uint32_t pcs_40g_ll_eee_cfg; + /* [0x78] 40G LL PCS EEE status */ + uint32_t pcs_40g_ll_eee_status; + /* [0x7c] SERDES 32-bit interface shift configuration (when s ... */ + uint32_t serdes_32_tx_shift; + /* [0x80] SERDES 32-bit interface shift configuration (when s ... */ + uint32_t serdes_32_rx_shift; + /* [0x84] SERDES 32-bit interface bit selection */ + uint32_t serdes_32_tx_sel; + /* [0x88] SERDES 32-bit interface bit selection */ + uint32_t serdes_32_rx_sel; + /* [0x8c] AN/LT wrapper control */ + uint32_t an_lt_ctrl; + /* [0x90] AN/LT wrapper register file address */ + uint32_t an_lt_0_addr; + /* [0x94] AN/LT wrapper register file data */ + uint32_t an_lt_0_data; + /* [0x98] AN/LT wrapper register file address */ + uint32_t an_lt_1_addr; + /* [0x9c] AN/LT wrapper register file data */ + uint32_t an_lt_1_data; + /* [0xa0] AN/LT wrapper register file address */ + uint32_t an_lt_2_addr; + /* [0xa4] AN/LT wrapper register file data */ + uint32_t an_lt_2_data; + /* [0xa8] AN/LT wrapper register file address */ + uint32_t an_lt_3_addr; + /* [0xac] AN/LT wrapper register file data */ + uint32_t an_lt_3_data; + /* [0xb0] External SERDES control */ + uint32_t ext_serdes_ctrl; + uint32_t rsrvd[19]; +}; + +struct al_eth_mac_regs { + struct al_eth_mac_1g mac_1g; /* [0x000] */ + struct al_eth_mac_10g mac_10g; /* [0x400] */ + uint32_t rsrvd_0[64]; /* [0x800] */ + struct al_eth_mac_gen gen; /* [0x900] */ + struct al_eth_mac_kr kr; /* [0xa00] */ + struct al_eth_mac_sgmii sgmii; /* [0xb00] */ + struct al_eth_mac_stat stat; /* [0xc00] */ + struct al_eth_mac_stat_lane stat_lane[4]; /* [0xd00] */ + struct al_eth_mac_gen_v3 gen_v3; /* [0xe00] */ +}; + + +/* +* Registers Fields +*/ + +/**** control register (1G mac) ****/ +/* enable Half Duplex */ +#define AL_ETH_1G_MAC_CTRL_HD_EN (1 << 10) +/* enable 1G speed */ +#define AL_ETH_1G_MAC_CTRL_1G_SPD (1 << 3) +/* enable 10M speed */ +#define AL_ETH_1G_MAC_CTRL_10M_SPD (1 << 25) + + +/**** 10G MAC register ****/ +/* mdio_cfg_status */ +#define ETH_10G_MAC_MDIO_CFG_HOLD_TIME_MASK 0x0000001c +#define ETH_10G_MAC_MDIO_CFG_HOLD_TIME_SHIFT 2 + +#define ETH_10G_MAC_MDIO_CFG_HOLD_TIME_1_CLK 0 +#define ETH_10G_MAC_MDIO_CFG_HOLD_TIME_3_CLK 1 +#define ETH_10G_MAC_MDIO_CFG_HOLD_TIME_5_CLK 2 +#define ETH_10G_MAC_MDIO_CFG_HOLD_TIME_7_CLK 3 +#define ETH_10G_MAC_MDIO_CFG_HOLD_TIME_9_CLK 4 +#define ETH_10G_MAC_MDIO_CFG_HOLD_TIME_11_CLK 5 +#define ETH_10G_MAC_MDIO_CFG_HOLD_TIME_13_CLK 6 +#define ETH_10G_MAC_MDIO_CFG_HOLD_TIME_15_CLK 7 + +/**** version register ****/ +/* Revision number (Minor) */ +#define ETH_MAC_GEN_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF +#define ETH_MAC_GEN_VERSION_RELEASE_NUM_MINOR_SHIFT 0 +/* Revision number (Major) */ +#define ETH_MAC_GEN_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00 +#define ETH_MAC_GEN_VERSION_RELEASE_NUM_MAJOR_SHIFT 8 +/* Date of release */ +#define ETH_MAC_GEN_VERSION_DATE_DAY_MASK 0x001F0000 +#define ETH_MAC_GEN_VERSION_DATE_DAY_SHIFT 16 +/* Month of release */ +#define ETH_MAC_GEN_VERSION_DATA_MONTH_MASK 0x01E00000 +#define ETH_MAC_GEN_VERSION_DATA_MONTH_SHIFT 21 +/* Year of release (starting from 2000) */ +#define ETH_MAC_GEN_VERSION_DATE_YEAR_MASK 0x3E000000 +#define ETH_MAC_GEN_VERSION_DATE_YEAR_SHIFT 25 +/* Reserved */ +#define ETH_MAC_GEN_VERSION_RESERVED_MASK 0xC0000000 +#define ETH_MAC_GEN_VERSION_RESERVED_SHIFT 30 + +/**** cfg register ****/ +/* Selects between the 10/100/1000 MAC and the 1/2 */ +#define ETH_MAC_GEN_CFG_MAC_1_10 (1 << 0) +/* Selects the operation mode of the 1/2 */ +#define ETH_MAC_GEN_CFG_XGMII_SGMII_MASK 0x00000006 +#define ETH_MAC_GEN_CFG_XGMII_SGMII_SHIFT 1 +/* Selects the operation mode of the PCS: +0 - XAUI +1 - RXAUI */ +#define ETH_MAC_GEN_CFG_XAUI_RXAUI (1 << 3) +/* Swap bits of TBI (SGMII mode) interface */ +#define ETH_MAC_GEN_CFG_SWAP_TBI_RX (1 << 4) +/* Determines the offset of the TBI bus on the SerDes interface: ... */ +#define ETH_MAC_GEN_CFG_TBI_MSB_RX (1 << 5) +/* Selects the SGMII PCS/MAC:0 – 10G MAC with SGMII1 – 1G MAC wi ... */ +#define ETH_MAC_GEN_CFG_SGMII_SEL (1 << 6) +/* Selects between RGMII and SGMII for the 1G MAC:0 – RGMII1 – S ... */ +#define ETH_MAC_GEN_CFG_RGMII_SGMII_SEL (1 << 7) +/* Swap bits of TBI (SGMII mode) interface */ +#define ETH_MAC_GEN_CFG_SWAP_TBI_TX (1 << 8) +/* Determines the offset of the TBI bus on the SerDes interface: ... */ +#define ETH_MAC_GEN_CFG_TBI_MSB_TX (1 << 9) +/* Selection between the MDIO from 10/100/1000 MAC or the 1/2 */ +#define ETH_MAC_GEN_CFG_MDIO_1_10 (1 << 10) +/* Swap MDC output +0 – Normal +1 – Flipped */ +#define ETH_MAC_GEN_CFG_MDIO_POL (1 << 11) +/* Swap bits on SerDes interface */ +#define ETH_MAC_GEN_CFG_SWAP_SERDES_RX_MASK 0x000F0000 +#define ETH_MAC_GEN_CFG_SWAP_SERDES_RX_SHIFT 16 +/* Swap bits on SerDes interface */ +#define ETH_MAC_GEN_CFG_SWAP_SERDES_TX_MASK 0x0F000000 +#define ETH_MAC_GEN_CFG_SWAP_SERDES_TX_SHIFT 24 + +/**** mac_1g_cfg register ****/ +/* Selection of the input for the "set_1000" input of the Ethern ... */ +#define ETH_MAC_GEN_MAC_1G_CFG_SET_1000_SEL (1 << 0) +/* Default value for the 10/100/1000 MAC "set_1000" input */ +#define ETH_MAC_GEN_MAC_1G_CFG_SET_1000_DEF (1 << 1) +/* Selection of the input for the "set_10" input of the Ethernet ... */ +#define ETH_MAC_GEN_MAC_1G_CFG_SET_10_SEL (1 << 4) +/* Default value for the 10/100/1000 MAC "set_10" input */ +#define ETH_MAC_GEN_MAC_1G_CFG_SET_10_DEF (1 << 5) +/* Transmit low power enable */ +#define ETH_MAC_GEN_MAC_1G_CFG_LOWP_ENA (1 << 8) +/* Enable magic packet mode:0 - Sleep mode 1 - Normal operation ... */ +#define ETH_MAC_GEN_MAC_1G_CFG_SLEEPN (1 << 9) +/* Swap ff_tx_crc input */ +#define ETH_MAC_GEN_MAC_1G_CFG_SWAP_FF_TX_CRC (1 << 12) + +/**** mac_1g_stat register ****/ +/* Status of the en_10 output form the 10/100/1000 MAC */ +#define ETH_MAC_GEN_MAC_1G_STAT_EN_10 (1 << 0) +/* Status of the eth_mode output from th 10/100/1000 MAC */ +#define ETH_MAC_GEN_MAC_1G_STAT_ETH_MODE (1 << 1) +/* Status of the lowp output from the 10/100/1000 MAC */ +#define ETH_MAC_GEN_MAC_1G_STAT_LOWP (1 << 4) +/* Status of the wakeup output from the 10/100/1000 MAC */ +#define ETH_MAC_GEN_MAC_1G_STAT_WAKEUP (1 << 5) + +/**** rgmii_cfg register ****/ +/* Selection of the input for the "set_1000" input of the RGMII ... */ +#define ETH_MAC_GEN_RGMII_CFG_SET_1000_SEL (1 << 0) +/* Default value for the RGMII converter "set_1000" input */ +#define ETH_MAC_GEN_RGMII_CFG_SET_1000_DEF (1 << 1) +/* Selection of the input for the "set_10" input of the RGMII co ... */ +#define ETH_MAC_GEN_RGMII_CFG_SET_10_SEL (1 << 4) +/* Default value for the 10/100/1000 MAC "set_10" input */ +#define ETH_MAC_GEN_RGMII_CFG_SET_10_DEF (1 << 5) +/* Enable automatic speed selection (based on PHY in-band status ... */ +#define ETH_MAC_GEN_RGMII_CFG_ENA_AUTO (1 << 8) +/* Force full duplex, only valid when ena_auto is '1'. */ +#define ETH_MAC_GEN_RGMII_CFG_SET_FD (1 << 9) + +/**** rgmii_stat register ****/ +/* Status of the speed output form the RGMII converter 00 - 10 M ... */ +#define ETH_MAC_GEN_RGMII_STAT_SPEED_MASK 0x00000003 +#define ETH_MAC_GEN_RGMII_STAT_SPEED_SHIFT 0 +/* Link indication from the RGMII converter (valid only if the e ... */ +#define ETH_MAC_GEN_RGMII_STAT_LINK (1 << 4) +/* Full duplex indication from the RGMII converter (valid only i ... */ +#define ETH_MAC_GEN_RGMII_STAT_DUP (1 << 5) + +/**** mac_10g_cfg register ****/ +/* Instruct the XGMII to transmit local fault. */ +#define ETH_MAC_GEN_MAC_10G_CFG_TX_LOC_FAULT (1 << 0) +/* Instruct the XGMII to transmit remote fault. */ +#define ETH_MAC_GEN_MAC_10G_CFG_TX_REM_FAULT (1 << 1) +/* Instruct the XGMII to transmit link fault. */ +#define ETH_MAC_GEN_MAC_10G_CFG_TX_LI_FAULT (1 << 2) +/* Synchronous reset for the PCS layer */ +#define ETH_MAC_GEN_MAC_10G_CFG_SG_SRESET (1 << 3) +/* PHY LOS indication selection00 - Select register value from p ... */ +#define ETH_MAC_GEN_MAC_10G_CFG_PHY_LOS_SEL_MASK 0x00000030 +#define ETH_MAC_GEN_MAC_10G_CFG_PHY_LOS_SEL_SHIFT 4 +/* Default value for PHY LOS indication */ +#define ETH_MAC_GEN_MAC_10G_CFG_PHY_LOS_DEF (1 << 6) +/* Reverse polarity of the LOS signal from the SerDes */ +#define ETH_MAC_GEN_MAC_10G_CFG_PHY_LOS_POL (1 << 7) +/* Transmit low power enable */ +#define ETH_MAC_GEN_MAC_10G_CFG_LOWP_ENA (1 << 8) +/* Swap ff_tx_crc input */ +#define ETH_MAC_GEN_MAC_10G_CFG_SWAP_FF_TX_CRC (1 << 12) + +/**** mac_10g_stat register ****/ +/* XGMII RS detects local fault */ +#define ETH_MAC_GEN_MAC_10G_STAT_LOC_FAULT (1 << 0) +/* XGMII RS detects remote fault */ +#define ETH_MAC_GEN_MAC_10G_STAT_REM_FAULT (1 << 1) +/* XGMII RS detects link fault */ +#define ETH_MAC_GEN_MAC_10G_STAT_LI_FAULT (1 << 2) +/* PFC mode */ +#define ETH_MAC_GEN_MAC_10G_STAT_PFC_MODE (1 << 3) + +#define ETH_MAC_GEN_MAC_10G_STAT_SG_ENA (1 << 4) + +#define ETH_MAC_GEN_MAC_10G_STAT_SG_ANDONE (1 << 5) + +#define ETH_MAC_GEN_MAC_10G_STAT_SG_SYNC (1 << 6) + +#define ETH_MAC_GEN_MAC_10G_STAT_SG_SPEED_MASK 0x00000180 +#define ETH_MAC_GEN_MAC_10G_STAT_SG_SPEED_SHIFT 7 +/* Status of the lowp output form the 1/2.5/10G MAC */ +#define ETH_MAC_GEN_MAC_10G_STAT_LOWP (1 << 9) +/* Status of the ts_avail output from th 1/2.5/10G MAC */ +#define ETH_MAC_GEN_MAC_10G_STAT_TS_AVAIL (1 << 10) +/* Transmit pause indication */ +#define ETH_MAC_GEN_MAC_10G_STAT_PAUSE_ON_MASK 0xFF000000 +#define ETH_MAC_GEN_MAC_10G_STAT_PAUSE_ON_SHIFT 24 + +/**** xaui_cfg register ****/ +/* Increase rate matching FIFO threshold */ +#define ETH_MAC_GEN_XAUI_CFG_JUMBO_EN (1 << 0) + +/**** xaui_stat register ****/ +/* Lane alignment status */ +#define ETH_MAC_GEN_XAUI_STAT_ALIGN_DONE (1 << 0) +/* Lane synchronization */ +#define ETH_MAC_GEN_XAUI_STAT_SYNC_MASK 0x000000F0 +#define ETH_MAC_GEN_XAUI_STAT_SYNC_SHIFT 4 +/* Code group alignment indication */ +#define ETH_MAC_GEN_XAUI_STAT_CG_ALIGN_MASK 0x00000F00 +#define ETH_MAC_GEN_XAUI_STAT_CG_ALIGN_SHIFT 8 + +/**** rxaui_cfg register ****/ +/* Increase rate matching FIFO threshold */ +#define ETH_MAC_GEN_RXAUI_CFG_JUMBO_EN (1 << 0) +/* Scrambler enable */ +#define ETH_MAC_GEN_RXAUI_CFG_SRBL_EN (1 << 1) +/* Disparity calculation across lanes enabled */ +#define ETH_MAC_GEN_RXAUI_CFG_DISP_ACROSS_LANE (1 << 2) + +/**** rxaui_stat register ****/ +/* Lane alignment status */ +#define ETH_MAC_GEN_RXAUI_STAT_ALIGN_DONE (1 << 0) +/* Lane synchronization */ +#define ETH_MAC_GEN_RXAUI_STAT_SYNC_MASK 0x000000F0 +#define ETH_MAC_GEN_RXAUI_STAT_SYNC_SHIFT 4 +/* Code group alignment indication */ +#define ETH_MAC_GEN_RXAUI_STAT_CG_ALIGN_MASK 0x00000F00 +#define ETH_MAC_GEN_RXAUI_STAT_CG_ALIGN_SHIFT 8 + +/**** sd_cfg register ****/ +/* Signal detect selection +0 - from register +1 - from SerDes */ +#define ETH_MAC_GEN_SD_CFG_SEL_MASK 0x0000000F +#define ETH_MAC_GEN_SD_CFG_SEL_SHIFT 0 +/* Signal detect value */ +#define ETH_MAC_GEN_SD_CFG_VAL_MASK 0x000000F0 +#define ETH_MAC_GEN_SD_CFG_VAL_SHIFT 4 +/* Signal detect revers polarity (reverse polarity of signal fro ... */ +#define ETH_MAC_GEN_SD_CFG_POL_MASK 0x00000F00 +#define ETH_MAC_GEN_SD_CFG_POL_SHIFT 8 + +/**** mdio_ctrl_1 register ****/ +/* Available indication0 - The port was available and it is capt ... */ +#define ETH_MAC_GEN_MDIO_CTRL_1_AVAIL (1 << 0) + +/**** mdio_1 register ****/ +/* Current Ethernet interface number that controls the MDIO port ... */ +#define ETH_MAC_GEN_MDIO_1_INFO_MASK 0x000000FF +#define ETH_MAC_GEN_MDIO_1_INFO_SHIFT 0 + +/**** mdio_ctrl_2 register ****/ +/* Available indication0 - The port was available and it is capt ... */ +#define ETH_MAC_GEN_MDIO_CTRL_2_AVAIL (1 << 0) + +/**** mdio_2 register ****/ +/* Current Ethernet interface number that controls the MDIO port ... */ +#define ETH_MAC_GEN_MDIO_2_INFO_MASK 0x000000FF +#define ETH_MAC_GEN_MDIO_2_INFO_SHIFT 0 + +/**** xgmii_dfifo_32_64 register ****/ +/* FIFO enable */ +#define ETH_MAC_GEN_XGMII_DFIFO_32_64_ENABLE (1 << 0) +/* Read Write command every 2 cycles */ +#define ETH_MAC_GEN_XGMII_DFIFO_32_64_RW_2_CYCLES (1 << 1) +/* Swap LSB MSB when creating wider bus */ +#define ETH_MAC_GEN_XGMII_DFIFO_32_64_SWAP_LSB_MSB (1 << 2) +/* Software reset */ +#define ETH_MAC_GEN_XGMII_DFIFO_32_64_SW_RESET (1 << 4) +/* Read threshold */ +#define ETH_MAC_GEN_XGMII_DFIFO_32_64_READ_TH_MASK 0x0000FF00 +#define ETH_MAC_GEN_XGMII_DFIFO_32_64_READ_TH_SHIFT 8 +/* FIFO used */ +#define ETH_MAC_GEN_XGMII_DFIFO_32_64_USED_MASK 0x00FF0000 +#define ETH_MAC_GEN_XGMII_DFIFO_32_64_USED_SHIFT 16 + +/**** xgmii_dfifo_64_32 register ****/ +/* FIFO enable */ +#define ETH_MAC_GEN_XGMII_DFIFO_64_32_ENABLE (1 << 0) +/* Read Write command every 2 cycles */ +#define ETH_MAC_GEN_XGMII_DFIFO_64_32_RW_2_CYCLES (1 << 1) +/* Swap LSB MSB when creating wider bus */ +#define ETH_MAC_GEN_XGMII_DFIFO_64_32_SWAP_LSB_MSB (1 << 2) +/* Software reset */ +#define ETH_MAC_GEN_XGMII_DFIFO_64_32_SW_RESET (1 << 4) +/* Read threshold */ +#define ETH_MAC_GEN_XGMII_DFIFO_64_32_READ_TH_MASK 0x0000FF00 +#define ETH_MAC_GEN_XGMII_DFIFO_64_32_READ_TH_SHIFT 8 +/* FIFO used */ +#define ETH_MAC_GEN_XGMII_DFIFO_64_32_USED_MASK 0x00FF0000 +#define ETH_MAC_GEN_XGMII_DFIFO_64_32_USED_SHIFT 16 + +/**** sd_fifo_ctrl register ****/ +/* FIFO enable */ +#define ETH_MAC_GEN_SD_FIFO_CTRL_ENABLE_MASK 0x0000000F +#define ETH_MAC_GEN_SD_FIFO_CTRL_ENABLE_SHIFT 0 +/* Software reset */ +#define ETH_MAC_GEN_SD_FIFO_CTRL_SW_RESET_MASK 0x000000F0 +#define ETH_MAC_GEN_SD_FIFO_CTRL_SW_RESET_SHIFT 4 +/* Read threshold */ +#define ETH_MAC_GEN_SD_FIFO_CTRL_READ_TH_MASK 0x0000FF00 +#define ETH_MAC_GEN_SD_FIFO_CTRL_READ_TH_SHIFT 8 + +/**** sd_fifo_stat register ****/ +/* FIFO 0 used */ +#define ETH_MAC_GEN_SD_FIFO_STAT_USED_0_MASK 0x000000FF +#define ETH_MAC_GEN_SD_FIFO_STAT_USED_0_SHIFT 0 +/* FIFO 1 used */ +#define ETH_MAC_GEN_SD_FIFO_STAT_USED_1_MASK 0x0000FF00 +#define ETH_MAC_GEN_SD_FIFO_STAT_USED_1_SHIFT 8 +/* FIFO 2 used */ +#define ETH_MAC_GEN_SD_FIFO_STAT_USED_2_MASK 0x00FF0000 +#define ETH_MAC_GEN_SD_FIFO_STAT_USED_2_SHIFT 16 +/* FIFO 3 used */ +#define ETH_MAC_GEN_SD_FIFO_STAT_USED_3_MASK 0xFF000000 +#define ETH_MAC_GEN_SD_FIFO_STAT_USED_3_SHIFT 24 + +/**** mux_sel register ****/ +/* SGMII input selection selector00 – SerDes 001 – SerDes 110 – ... */ +#define ETH_MAC_GEN_MUX_SEL_SGMII_IN_MASK 0x00000003 +#define ETH_MAC_GEN_MUX_SEL_SGMII_IN_SHIFT 0 +/* RXAUI Lane 0 Input00 – SerDes 001 – SerDes 110 – SerDes 211 – ... */ +#define ETH_MAC_GEN_MUX_SEL_RXAUI_0_IN_MASK 0x0000000C +#define ETH_MAC_GEN_MUX_SEL_RXAUI_0_IN_SHIFT 2 +/* RXAUI Lane 1 Input00 – SERDES 001 – SERDES 110 – SERDES 211 – ... */ +#define ETH_MAC_GEN_MUX_SEL_RXAUI_1_IN_MASK 0x00000030 +#define ETH_MAC_GEN_MUX_SEL_RXAUI_1_IN_SHIFT 4 +/* XAUI Lane 0 Input00 – SERDES 001 – SERDES 110 – SERDES 211 – ... */ +#define ETH_MAC_GEN_MUX_SEL_XAUI_0_IN_MASK 0x000000C0 +#define ETH_MAC_GEN_MUX_SEL_XAUI_0_IN_SHIFT 6 +/* XAUI Lane 1 Input00 – SERDES 001 – SERDES 110 – SERDES 211 – ... */ +#define ETH_MAC_GEN_MUX_SEL_XAUI_1_IN_MASK 0x00000300 +#define ETH_MAC_GEN_MUX_SEL_XAUI_1_IN_SHIFT 8 +/* XAUI Lane 2 Input00 – SERDES 001 – SERDES 110 – SERDES 211 – ... */ +#define ETH_MAC_GEN_MUX_SEL_XAUI_2_IN_MASK 0x00000C00 +#define ETH_MAC_GEN_MUX_SEL_XAUI_2_IN_SHIFT 10 +/* XAUI Lane 3 Input00 – SERDES 001 – SERDES 110 – SERDES 211 – ... */ +#define ETH_MAC_GEN_MUX_SEL_XAUI_3_IN_MASK 0x00003000 +#define ETH_MAC_GEN_MUX_SEL_XAUI_3_IN_SHIFT 12 +/* KR PCS Input 00 - SERDES 0 01 - SERDES 1 10 - SERDES 2 11 - S ... */ +#define ETH_MAC_GEN_MUX_SEL_KR_IN_MASK 0x0000C000 +#define ETH_MAC_GEN_MUX_SEL_KR_IN_SHIFT 14 +/* SerDes 0 input selection (TX)000 – XAUI lane 0 001 – XAUI lan ... */ +#define ETH_MAC_GEN_MUX_SEL_SERDES_0_TX_MASK 0x00070000 +#define ETH_MAC_GEN_MUX_SEL_SERDES_0_TX_SHIFT 16 +/* SERDES 1 input selection (Tx)000 – XAUI lane 0 001 – XAUI lan ... */ +#define ETH_MAC_GEN_MUX_SEL_SERDES_1_TX_MASK 0x00380000 +#define ETH_MAC_GEN_MUX_SEL_SERDES_1_TX_SHIFT 19 +/* SerDes 2 input selection (Tx)000 – XAUI lane 0 001 – XAUI lan ... */ +#define ETH_MAC_GEN_MUX_SEL_SERDES_2_TX_MASK 0x01C00000 +#define ETH_MAC_GEN_MUX_SEL_SERDES_2_TX_SHIFT 22 +/* SerDes 3 input selection (Tx)000 – XAUI lane 0 001 – XAUI lan ... */ +#define ETH_MAC_GEN_MUX_SEL_SERDES_3_TX_MASK 0x0E000000 +#define ETH_MAC_GEN_MUX_SEL_SERDES_3_TX_SHIFT 25 + +/**** clk_cfg register ****/ +/* Rx/Tx lane 0 clock MUX selectmust be aligned with data select ... */ +#define ETH_MAC_GEN_CLK_CFG_LANE_0_CLK_SEL_MASK 0x00000003 +#define ETH_MAC_GEN_CLK_CFG_LANE_0_CLK_SEL_SHIFT 0 +/* Rx/Tx lane 0 clock MUX select must be aligned with data selec ... */ +#define ETH_MAC_GEN_CLK_CFG_LANE_1_CLK_SEL_MASK 0x00000030 +#define ETH_MAC_GEN_CLK_CFG_LANE_1_CLK_SEL_SHIFT 4 +/* RX/TX lane 0 clock MUX select (should be aligned with data se ... */ +#define ETH_MAC_GEN_CLK_CFG_LANE_2_CLK_SEL_MASK 0x00000300 +#define ETH_MAC_GEN_CLK_CFG_LANE_2_CLK_SEL_SHIFT 8 +/* Rx/Tx lane 0 clock MUX select must be aligned with data selec ... */ +#define ETH_MAC_GEN_CLK_CFG_LANE_3_CLK_SEL_MASK 0x00003000 +#define ETH_MAC_GEN_CLK_CFG_LANE_3_CLK_SEL_SHIFT 12 +/* MAC GMII Rx clock MUX select must be aligned with data select ... */ +#define ETH_MAC_GEN_CLK_CFG_GMII_RX_CLK_SEL (1 << 16) +/* MAC GMII Tx clock MUX select (should be aligned with data sel ... */ +#define ETH_MAC_GEN_CLK_CFG_GMII_TX_CLK_SEL (1 << 18) +/* Tx clock MUX select,Selects the internal clock for the Tx dat ... */ +#define ETH_MAC_GEN_CLK_CFG_TX_CLK_SEL (1 << 28) +/* Rxclock MUX selectSelects the internal clock for the Rx data ... */ +#define ETH_MAC_GEN_CLK_CFG_RX_CLK_SEL (1 << 30) + +/**** los_sel register ****/ +/* Selected LOS/SD select00 – SerDes 0 01 – SerDes 110 – SerDes ... */ +#define ETH_MAC_GEN_LOS_SEL_LANE_0_SEL_MASK 0x00000003 +#define ETH_MAC_GEN_LOS_SEL_LANE_0_SEL_SHIFT 0 +/* Selected LOS/SD select 00 - SerDes 0 01 - SerDes 1 10 - SerD ... */ +#define ETH_MAC_GEN_LOS_SEL_LANE_1_SEL_MASK 0x00000030 +#define ETH_MAC_GEN_LOS_SEL_LANE_1_SEL_SHIFT 4 +/* Selected LOS/SD select 00 - SerDes 0 01 - SerDes 1 10 - SerD ... */ +#define ETH_MAC_GEN_LOS_SEL_LANE_2_SEL_MASK 0x00000300 +#define ETH_MAC_GEN_LOS_SEL_LANE_2_SEL_SHIFT 8 +/* Selected LOS/SD select 00 - SerDes 0 01 - SerDes 1 10 - SerD ... */ +#define ETH_MAC_GEN_LOS_SEL_LANE_3_SEL_MASK 0x00003000 +#define ETH_MAC_GEN_LOS_SEL_LANE_3_SEL_SHIFT 12 + +/**** rgmii_sel register ****/ +/* Swap [3:0] input with [7:4] */ +#define ETH_MAC_GEN_RGMII_SEL_RX_SWAP_3_0 (1 << 0) +/* Swap [4] input with [9] */ +#define ETH_MAC_GEN_RGMII_SEL_RX_SWAP_4 (1 << 1) +/* Swap [7:4] input with [3:0] */ +#define ETH_MAC_GEN_RGMII_SEL_RX_SWAP_7_3 (1 << 2) +/* Swap [9] input with [4] */ +#define ETH_MAC_GEN_RGMII_SEL_RX_SWAP_9 (1 << 3) +/* Swap [3:0] input with [7:4] */ +#define ETH_MAC_GEN_RGMII_SEL_TX_SWAP_3_0 (1 << 4) +/* Swap [4] input with [9] */ +#define ETH_MAC_GEN_RGMII_SEL_TX_SWAP_4 (1 << 5) +/* Swap [7:4] input with [3:0] */ +#define ETH_MAC_GEN_RGMII_SEL_TX_SWAP_7_3 (1 << 6) +/* Swap [9] input with [4] */ +#define ETH_MAC_GEN_RGMII_SEL_TX_SWAP_9 (1 << 7) + +/**** led_cfg register ****/ +/* LED source selection:0 – Default reg1 – Rx activity2 – Tx act ... */ +#define ETH_MAC_GEN_LED_CFG_SEL_MASK 0x0000000F +#define ETH_MAC_GEN_LED_CFG_SEL_SHIFT 0 + +/* turn the led on/off based on default value field (ETH_MAC_GEN_LED_CFG_DEF) */ +#define ETH_MAC_GEN_LED_CFG_SEL_DEFAULT_REG 0 +#define ETH_MAC_GEN_LED_CFG_SEL_RX_ACTIVITY_DEPRECIATED 1 +#define ETH_MAC_GEN_LED_CFG_SEL_TX_ACTIVITY_DEPRECIATED 2 +#define ETH_MAC_GEN_LED_CFG_SEL_RX_TX_ACTIVITY_DEPRECIATED 3 +#define ETH_MAC_GEN_LED_CFG_SEL_LINK_ACTIVITY 10 + +/* LED default value */ +#define ETH_MAC_GEN_LED_CFG_DEF (1 << 4) +/* LED signal polarity */ +#define ETH_MAC_GEN_LED_CFG_POL (1 << 5) +/* activity timer (MSB) +32 bit timer @SB clock */ +#define ETH_MAC_GEN_LED_CFG_ACT_TIMER_MASK 0x00FF0000 +#define ETH_MAC_GEN_LED_CFG_ACT_TIMER_SHIFT 16 +/* activity timer (MSB) +32 bit timer @SB clock */ +#define ETH_MAC_GEN_LED_CFG_BLINK_TIMER_MASK 0xFF000000 +#define ETH_MAC_GEN_LED_CFG_BLINK_TIMER_SHIFT 24 + +/**** pcs_addr register ****/ +/* Address value */ +#define ETH_MAC_KR_PCS_ADDR_VAL_MASK 0x0000FFFF +#define ETH_MAC_KR_PCS_ADDR_VAL_SHIFT 0 + +/**** pcs_data register ****/ +/* Data value */ +#define ETH_MAC_KR_PCS_DATA_VAL_MASK 0x0000FFFF +#define ETH_MAC_KR_PCS_DATA_VAL_SHIFT 0 + +/**** an_addr register ****/ +/* Address value */ +#define ETH_MAC_KR_AN_ADDR_VAL_MASK 0x0000FFFF +#define ETH_MAC_KR_AN_ADDR_VAL_SHIFT 0 + +/**** an_data register ****/ +/* Data value */ +#define ETH_MAC_KR_AN_DATA_VAL_MASK 0x0000FFFF +#define ETH_MAC_KR_AN_DATA_VAL_SHIFT 0 + +/**** pma_addr register ****/ +/* Dddress value */ +#define ETH_MAC_KR_PMA_ADDR_VAL_MASK 0x0000FFFF +#define ETH_MAC_KR_PMA_ADDR_VAL_SHIFT 0 + +/**** pma_data register ****/ +/* Data value */ +#define ETH_MAC_KR_PMA_DATA_VAL_MASK 0x0000FFFF +#define ETH_MAC_KR_PMA_DATA_VAL_SHIFT 0 + +/**** mtip_addr register ****/ +/* Address value */ +#define ETH_MAC_KR_MTIP_ADDR_VAL_MASK 0x0000FFFF +#define ETH_MAC_KR_MTIP_ADDR_VAL_SHIFT 0 + +/**** mtip_data register ****/ +/* Data value */ +#define ETH_MAC_KR_MTIP_DATA_VAL_MASK 0x0000FFFF +#define ETH_MAC_KR_MTIP_DATA_VAL_SHIFT 0 + +/**** pcs_cfg register ****/ +/* Enable Auto-Negotiation after Reset */ +#define ETH_MAC_KR_PCS_CFG_STRAP_AN_ENA (1 << 0) +/* Signal detect selector for the EEE0 – Register default value1 ... */ +#define ETH_MAC_KR_PCS_CFG_EEE_SD_SEL (1 << 4) +/* Signal detect default value for the EEE */ +#define ETH_MAC_KR_PCS_CFG_EEE_DEF_VAL (1 << 5) +/* Signal detect polarity reversal for the EEE */ +#define ETH_MAC_KR_PCS_CFG_EEE_SD_POL (1 << 6) +/* EEE timer value */ +#define ETH_MAC_KR_PCS_CFG_EEE_TIMER_VAL_MASK 0x0000FF00 +#define ETH_MAC_KR_PCS_CFG_EEE_TIMER_VAL_SHIFT 8 +/* Selects source for the enable SerDes DME signal0 – Register v ... */ +#define ETH_MAC_KR_PCS_CFG_DME_SEL (1 << 16) +/* DME default value */ +#define ETH_MAC_KR_PCS_CFG_DME_VAL (1 << 17) +/* DME default polarity reversal when selecting PCS output */ +#define ETH_MAC_KR_PCS_CFG_DME_POL (1 << 18) + +/**** pcs_stat register ****/ +/* Link enable by the Auto-Negotiation */ +#define ETH_MAC_KR_PCS_STAT_AN_LINK_CONTROL_MASK 0x0000003F +#define ETH_MAC_KR_PCS_STAT_AN_LINK_CONTROL_SHIFT 0 +/* Block lock */ +#define ETH_MAC_KR_PCS_STAT_BLOCK_LOCK (1 << 8) +/* hi BER */ +#define ETH_MAC_KR_PCS_STAT_HI_BER (1 << 9) + +#define ETH_MAC_KR_PCS_STAT_RX_WAKE_ERR (1 << 16) + +#define ETH_MAC_KR_PCS_STAT_PMA_TXMODE_ALERT (1 << 17) + +#define ETH_MAC_KR_PCS_STAT_PMA_TXMODE_QUIET (1 << 18) + +#define ETH_MAC_KR_PCS_STAT_PMA_RXMODE_QUIET (1 << 19) + +#define ETH_MAC_KR_PCS_STAT_RX_LPI_ACTIVE (1 << 20) + +#define ETH_MAC_KR_PCS_STAT_TX_LPI_ACTIVE (1 << 21) + +/**** reg_addr register ****/ +/* Address value */ +#define ETH_MAC_SGMII_REG_ADDR_VAL_MASK 0x0000001F +#define ETH_MAC_SGMII_REG_ADDR_VAL_SHIFT 0 + +#define ETH_MAC_SGMII_REG_ADDR_CTRL_REG 0x0 +#define ETH_MAC_SGMII_REG_ADDR_IF_MODE_REG 0x14 + +/**** reg_data register ****/ +/* Data value */ +#define ETH_MAC_SGMII_REG_DATA_VAL_MASK 0x0000FFFF +#define ETH_MAC_SGMII_REG_DATA_VAL_SHIFT 0 + +#define ETH_MAC_SGMII_REG_DATA_CTRL_AN_ENABLE (1 << 12) +#define ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_EN (1 << 0) +#define ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_AN (1 << 1) +#define ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_SPEED_MASK 0xC +#define ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_SPEED_SHIFT 2 +#define ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_SPEED_10 0x0 +#define ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_SPEED_100 0x1 +#define ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_SPEED_1000 0x2 +#define ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_DUPLEX (1 << 4) + +/**** clk_div register ****/ +/* Value for 1000M selection */ +#define ETH_MAC_SGMII_CLK_DIV_VAL_1000_MASK 0x000000FF +#define ETH_MAC_SGMII_CLK_DIV_VAL_1000_SHIFT 0 +/* Value for 100M selection */ +#define ETH_MAC_SGMII_CLK_DIV_VAL_100_MASK 0x0000FF00 +#define ETH_MAC_SGMII_CLK_DIV_VAL_100_SHIFT 8 +/* Value for 10M selection */ +#define ETH_MAC_SGMII_CLK_DIV_VAL_10_MASK 0x00FF0000 +#define ETH_MAC_SGMII_CLK_DIV_VAL_10_SHIFT 16 +/* Bypass PCS selection */ +#define ETH_MAC_SGMII_CLK_DIV_BYPASS (1 << 24) +/* Divider selection when bypass field is '1', one hot001 – 1000 ... */ +#define ETH_MAC_SGMII_CLK_DIV_SEL_MASK 0x0E000000 +#define ETH_MAC_SGMII_CLK_DIV_SEL_SHIFT 25 + +/**** link_stat register ****/ + +#define ETH_MAC_SGMII_LINK_STAT_SET_1000 (1 << 0) + +#define ETH_MAC_SGMII_LINK_STAT_SET_100 (1 << 1) + +#define ETH_MAC_SGMII_LINK_STAT_SET_10 (1 << 2) + +#define ETH_MAC_SGMII_LINK_STAT_LED_AN (1 << 3) + +#define ETH_MAC_SGMII_LINK_STAT_HD_ENA (1 << 4) + +#define ETH_MAC_SGMII_LINK_STAT_LED_LINK (1 << 5) + +/**** afifo_ctrl register ****/ +/* enable tx input operation */ +#define ETH_MAC_GEN_V3_AFIFO_CTRL_EN_TX_IN (1 << 0) +/* enable tx output operation */ +#define ETH_MAC_GEN_V3_AFIFO_CTRL_EN_TX_OUT (1 << 1) +/* enable rx input operation */ +#define ETH_MAC_GEN_V3_AFIFO_CTRL_EN_RX_IN (1 << 4) +/* enable rx output operation */ +#define ETH_MAC_GEN_V3_AFIFO_CTRL_EN_RX_OUT (1 << 5) +/* enable tx FIFO input operation */ +#define ETH_MAC_GEN_V3_AFIFO_CTRL_EN_TX_FIFO_IN (1 << 8) +/* enable tx FIFO output operation */ +#define ETH_MAC_GEN_V3_AFIFO_CTRL_EN_TX_FIFO_OUT (1 << 9) +/* enable rx FIFO input operation */ +#define ETH_MAC_GEN_V3_AFIFO_CTRL_EN_RX_FIFO_IN (1 << 12) +/* enable rx FIFO output operation */ +#define ETH_MAC_GEN_V3_AFIFO_CTRL_EN_RX_FIFO_OUT (1 << 13) + +/**** tx_afifo_cfg_1 register ****/ +/* minimum packet size configuration */ +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_1_MIN_PKT_SIZE_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_1_MIN_PKT_SIZE_SHIFT 0 + +/**** tx_afifo_cfg_2 register ****/ +/* maximum packet size configuration */ +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_2_MAX_PKT_SIZE_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_2_MAX_PKT_SIZE_SHIFT 0 + +/**** tx_afifo_cfg_3 register ****/ +/* input bus width */ +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_3_INPUT_BUS_W_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_3_INPUT_BUS_W_SHIFT 0 +/* input bus width divide factor */ +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_3_INPUT_BUS_W_F_MASK 0xFFFF0000 +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_3_INPUT_BUS_W_F_SHIFT 16 + +/**** tx_afifo_cfg_4 register ****/ +/* output bus width */ +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_4_OUTPUT_BUS_W_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_4_OUTPUT_BUS_W_SHIFT 0 +/* output bus width divide factor */ +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_4_OUTPUT_BUS_W_F_MASK 0xFFFF0000 +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_4_OUTPUT_BUS_W_F_SHIFT 16 + +/**** tx_afifo_cfg_5 register ****/ +/* determines if the input bus is valid/read or “write enable” */ +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_5_INPUT_BUS_VALID_RDY (1 << 0) +/* determines if the output bus is valid/read or “write enable” */ +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_5_OUTPUT_BUS_VALID_RDY (1 << 1) +/* Swap input bus bytes */ +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_5_INPUT_BUS_SWAP_BYTES (1 << 4) +/* Swap output bus bytes */ +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_5_OUTPUT_BUS_SWAP_BYTES (1 << 5) +/* output clock select +0 – mac_ll_tx_clk +1 – clk_mac_sys_clk */ +#define ETH_MAC_GEN_V3_TX_AFIFO_CFG_5_OUTPUT_CLK_SEL (1 << 8) + +/**** rx_afifo_cfg_1 register ****/ +/* minimum packet size configuration */ +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_1_MIN_PKT_SIZE_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_1_MIN_PKT_SIZE_SHIFT 0 + +/**** rx_afifo_cfg_2 register ****/ +/* maximum packet size configuration */ +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_2_MAX_PKT_SIZE_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_2_MAX_PKT_SIZE_SHIFT 0 + +/**** rx_afifo_cfg_3 register ****/ +/* input bus width */ +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_3_INPUT_BUS_W_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_3_INPUT_BUS_W_SHIFT 0 +/* input bus width divide factor */ +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_3_INPUT_BUS_W_F_MASK 0xFFFF0000 +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_3_INPUT_BUS_W_F_SHIFT 16 + +/**** rx_afifo_cfg_4 register ****/ +/* output bus width */ +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_4_OUTPUT_BUS_W_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_4_OUTPUT_BUS_W_SHIFT 0 +/* output bus width divide factor */ +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_4_OUTPUT_BUS_W_F_MASK 0xFFFF0000 +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_4_OUTPUT_BUS_W_F_SHIFT 16 + +/**** rx_afifo_cfg_5 register ****/ +/* determines if the input bus is valid/read or “write enable” */ +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_5_INPUT_BUS_VALID_RDY (1 << 0) +/* determines if the output bus is valid/read or “write enable” */ +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_5_OUTPUT_BUS_VALID_RDY (1 << 1) +/* Swap input bus bytes */ +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_5_INPUT_BUS_SWAP_BYTES (1 << 4) +/* Swap output bus bytes */ +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_5_OUTPUT_BUS_SWAP_BYTES (1 << 5) +/* input clock select0 – mac_ll_rx_clk1 – clk_serdes_int_0_tx_dw ... */ +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_5_INPUT_CLK_SEL_MASK 0x00000300 +#define ETH_MAC_GEN_V3_RX_AFIFO_CFG_5_INPUT_CLK_SEL_SHIFT 8 + +/**** mac_sel register ****/ +/* Select the MAC that is connected to the SGMII PCS */ +#define ETH_MAC_GEN_V3_MAC_SEL_MAC_10G_SGMII (1 << 0) +/* Select between the 10G and 40G MAC +0 – 10G MAC +1 – 40G MAC */ +#define ETH_MAC_GEN_V3_MAC_SEL_MAC_10G_40G (1 << 4) + +/**** mac_10g_ll_cfg register ****/ +/* select between 10G (KR PCS) and 1G (SGMII) mode */ +#define ETH_MAC_GEN_V3_MAC_10G_LL_CFG_MODE_1G (1 << 0) +/* enable Magic packet detection in the MAC (all other packets a ... */ +#define ETH_MAC_GEN_V3_MAC_10G_LL_CFG_MAGIC_ENA (1 << 5) + +/**** mac_10g_ll_ctrl register ****/ +/* Force the MAC to stop TX transmission after low power mode. */ +#define ETH_MAC_GEN_V3_MAC_10G_LL_CTRL_LPI_TXHOLD (1 << 0) + +/**** pcs_10g_ll_cfg register ****/ +/* RX FEC Enable */ +#define ETH_MAC_GEN_V3_PCS_10G_LL_CFG_FEC_EN_RX (1 << 0) +/* TX FEC enable */ +#define ETH_MAC_GEN_V3_PCS_10G_LL_CFG_FEC_EN_TX (1 << 1) +/* RX FEC error propagation enable, +(debug, always 0) */ +#define ETH_MAC_GEN_V3_PCS_10G_LL_CFG_FEC_ERR_ENA (1 << 2) +/* Gearbox configuration: +00 -16 +01 – 20 +10 – 32 +11 – reserved */ +#define ETH_MAC_GEN_V3_PCS_10G_LL_CFG_TX_GB_CFG_MASK 0x00000030 +#define ETH_MAC_GEN_V3_PCS_10G_LL_CFG_TX_GB_CFG_SHIFT 4 +/* Gearbox configuration: +00 -16 +01 – 20 +10 – 32 +11 – reserved */ +#define ETH_MAC_GEN_V3_PCS_10G_LL_CFG_RX_GB_CFG_MASK 0x000000C0 +#define ETH_MAC_GEN_V3_PCS_10G_LL_CFG_RX_GB_CFG_SHIFT 6 + +/**** pcs_10g_ll_status register ****/ +/* FEC locked indication */ +#define ETH_MAC_GEN_V3_PCS_10G_LL_STATUS_FEC_LOCKED (1 << 0) +/**** pcs_40g_ll_cfg register ****/ +/* RX FEC Enable */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_FEC_EN_RX_MASK 0x0000000F +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_FEC_EN_RX_SHIFT 0 +/* TX FEC enable */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_FEC_EN_TX_MASK 0x000000F0 +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_FEC_EN_TX_SHIFT 4 +/* RX FEC error propagation enable, (debug, always 0) */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_FEC_ERR_EN_MASK 0x00000F00 +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_FEC_ERR_EN_SHIFT 8 +/* SERDES width, 16 bit enable +1 – 16 +2 – 32 */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_SD_16B (1 << 12) +/* FEC 91 enable */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_FEC91_ENA (1 << 13) +/* PHY LOS indication selection 00 - Select register value from ... */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_PHY_LOS_SEL_MASK 0x00030000 +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_PHY_LOS_SEL_SHIFT 16 +/* PHY LOS default value */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_PHY_LOS_DEF (1 << 18) +/* PHY LOS polarity */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_PHY_LOS_POL (1 << 19) +/* Energy detect indication selection 00 - Select register valu ... */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_ENERGY_DETECT_SEL_MASK 0x00300000 +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_ENERGY_DETECT_SEL_SHIFT 20 +/* Energy detect default value */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_ENERGY_DETECT_DEF (1 << 22) +/* Energy detect polarity */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_CFG_ENERGY_DETECT_POL (1 << 23) + +/**** pcs_40g_ll_status register ****/ +/* Block lock */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_STATUS_BLOCK_LOCK_MASK 0x0000000F +#define ETH_MAC_GEN_V3_PCS_40G_LL_STATUS_BLOCK_LOCK_SHIFT 0 +/* align done */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_STATUS_ALIGN_DONE (1 << 4) +/* high BER */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_STATUS_HIGH_BER (1 << 8) +/* FEC locked indication */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_STATUS_FEC_LOCKED_MASK 0x0000F000 +#define ETH_MAC_GEN_V3_PCS_40G_LL_STATUS_FEC_LOCKED_SHIFT 12 + +/**** pcs_40g_ll_addr register ****/ +/* Address value */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_ADDR_VAL_MASK 0x0001FFFF +#define ETH_MAC_GEN_V3_PCS_40G_LL_ADDR_VAL_SHIFT 0 + +/**** pcs_40g_ll_data register ****/ +/* Data value */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_DATA_VAL_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_PCS_40G_LL_DATA_VAL_SHIFT 0 + +/**** mac_40g_ll_cfg register ****/ +/* change TX CRC polarity */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_CFG_SWAP_FF_TX_CRC (1 << 0) +/* force TX remote fault */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_CFG_TX_REM_FAULT (1 << 4) +/* force TX local fault */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_CFG_TX_LOC_FAULT (1 << 5) +/* force TX Link fault */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_CFG_TX_LI_FAULT (1 << 6) +/* PHY LOS indication selection 00 - Select register value from ... */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_CFG_PHY_LOS_SEL_MASK 0x00000300 +#define ETH_MAC_GEN_V3_MAC_40G_LL_CFG_PHY_LOS_SEL_SHIFT 8 +/* PHY LOS default value */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_CFG_PHY_LOS_DEF (1 << 10) +/* PHY LOS polarity */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_CFG_PHY_LOS_POL (1 << 11) + +/**** mac_40g_ll_status register ****/ +/* pause on indication */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_STATUS_PAUSE_ON_MASK 0x000000FF +#define ETH_MAC_GEN_V3_MAC_40G_LL_STATUS_PAUSE_ON_SHIFT 0 +/* local fault indication received */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_STATUS_LOC_FAULT (1 << 8) +/* remote fault indication received */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_STATUS_REM_FAULT (1 << 9) +/* Link fault indication */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_STATUS_LI_FAULT (1 << 10) + +/**** preamble_cfg_high register ****/ +/* preamble value */ +#define ETH_MAC_GEN_V3_PREAMBLE_CFG_HIGH_VAL_MASK 0x00FFFFFF +#define ETH_MAC_GEN_V3_PREAMBLE_CFG_HIGH_VAL_SHIFT 0 + +/**** mac_40g_ll_addr register ****/ +/* Address value */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_ADDR_VAL_MASK 0x000003FF +#define ETH_MAC_GEN_V3_MAC_40G_LL_ADDR_VAL_SHIFT 0 + +/**** mac_40g_ll_ctrl register ****/ +/* Force the MAC to stop TX transmission after low power mode. */ +#define ETH_MAC_GEN_V3_MAC_40G_LL_CTRL_LPI_TXHOLD (1 << 0) + +#define ETH_MAC_GEN_V3_MAC_40G_LL_CTRL_REG_LOWP_ENA (1 << 1) + +/**** pcs_40g_fec_91_ll_addr register ****/ +/* Address value */ +#define ETH_MAC_GEN_V3_PCS_40G_FEC_91_LL_ADDR_VAL_MASK 0x000001FF +#define ETH_MAC_GEN_V3_PCS_40G_FEC_91_LL_ADDR_VAL_SHIFT 0 + +/**** pcs_40g_fec_91_ll_data register ****/ +/* Data value */ +#define ETH_MAC_GEN_V3_PCS_40G_FEC_91_LL_DATA_VAL_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_PCS_40G_FEC_91_LL_DATA_VAL_SHIFT 0 + +/**** pcs_40g_ll_eee_cfg register ****/ +/* Low power timer configuration */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_EEE_CFG_TIMER_VAL_MASK 0x000000FF +#define ETH_MAC_GEN_V3_PCS_40G_LL_EEE_CFG_TIMER_VAL_SHIFT 0 +/* Low power Fast wake */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_EEE_CFG_LPI_FW (1 << 8) + +/**** pcs_40g_ll_eee_status register ****/ +/* TX LPI mode */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_EEE_STATUS_TX_LPI_MODE_MASK 0x00000003 +#define ETH_MAC_GEN_V3_PCS_40G_LL_EEE_STATUS_TX_LPI_MODE_SHIFT 0 +/* TX LPI state */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_EEE_STATUS_TX_LPI_STATE_MASK 0x00000070 +#define ETH_MAC_GEN_V3_PCS_40G_LL_EEE_STATUS_TX_LPI_STATE_SHIFT 4 +/* TX LPI mode */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_EEE_STATUS_RX_LPI_MODE (1 << 8) +/* TX LPI state */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_EEE_STATUS_RX_LPI_STATE_MASK 0x00007000 +#define ETH_MAC_GEN_V3_PCS_40G_LL_EEE_STATUS_RX_LPI_STATE_SHIFT 12 +/* TX LPI active */ +#define ETH_MAC_GEN_V3_PCS_40G_LL_EEE_STATUS_RX_LPI_ACTIVE (1 << 15) + +/**** serdes_32_tx_shift register ****/ +/* bit shift */ +#define ETH_MAC_GEN_V3_SERDES_32_TX_SHIFT_SERDES_0_MASK 0x0000001F +#define ETH_MAC_GEN_V3_SERDES_32_TX_SHIFT_SERDES_0_SHIFT 0 +/* bit shift */ +#define ETH_MAC_GEN_V3_SERDES_32_TX_SHIFT_SERDES_1_MASK 0x000003E0 +#define ETH_MAC_GEN_V3_SERDES_32_TX_SHIFT_SERDES_1_SHIFT 5 +/* bit shift */ +#define ETH_MAC_GEN_V3_SERDES_32_TX_SHIFT_SERDES_2_MASK 0x00007C00 +#define ETH_MAC_GEN_V3_SERDES_32_TX_SHIFT_SERDES_2_SHIFT 10 +/* bit shift */ +#define ETH_MAC_GEN_V3_SERDES_32_TX_SHIFT_SERDES_3_MASK 0x000F8000 +#define ETH_MAC_GEN_V3_SERDES_32_TX_SHIFT_SERDES_3_SHIFT 15 + +/**** serdes_32_rx_shift register ****/ +/* bit shift */ +#define ETH_MAC_GEN_V3_SERDES_32_RX_SHIFT_SERDES_0_MASK 0x0000001F +#define ETH_MAC_GEN_V3_SERDES_32_RX_SHIFT_SERDES_0_SHIFT 0 +/* bit shift */ +#define ETH_MAC_GEN_V3_SERDES_32_RX_SHIFT_SERDES_1_MASK 0x000003E0 +#define ETH_MAC_GEN_V3_SERDES_32_RX_SHIFT_SERDES_1_SHIFT 5 +/* bit shift */ +#define ETH_MAC_GEN_V3_SERDES_32_RX_SHIFT_SERDES_2_MASK 0x00007C00 +#define ETH_MAC_GEN_V3_SERDES_32_RX_SHIFT_SERDES_2_SHIFT 10 +/* bit shift */ +#define ETH_MAC_GEN_V3_SERDES_32_RX_SHIFT_SERDES_3_MASK 0x000F8000 +#define ETH_MAC_GEN_V3_SERDES_32_RX_SHIFT_SERDES_3_SHIFT 15 + +/**** serdes_32_tx_sel register ****/ +/* 0 – directly from serdes1 – swapped2 – swapped with shift3 - ... */ +#define ETH_MAC_GEN_V3_SERDES_32_TX_SEL_SERDES_0_MASK 0x00000003 +#define ETH_MAC_GEN_V3_SERDES_32_TX_SEL_SERDES_0_SHIFT 0 +/* 0 – directly from serdes1 – swapped2 – swapped with shift3 - ... */ +#define ETH_MAC_GEN_V3_SERDES_32_TX_SEL_SERDES_1_MASK 0x00000030 +#define ETH_MAC_GEN_V3_SERDES_32_TX_SEL_SERDES_1_SHIFT 4 +/* 0 – directly from serdes1 – swapped2 – swapped with shift3 - ... */ +#define ETH_MAC_GEN_V3_SERDES_32_TX_SEL_SERDES_2_MASK 0x00000300 +#define ETH_MAC_GEN_V3_SERDES_32_TX_SEL_SERDES_2_SHIFT 8 +/* 0 – directly from serdes1 – swapped2 – swapped with shift3 - ... */ +#define ETH_MAC_GEN_V3_SERDES_32_TX_SEL_SERDES_3_MASK 0x00003000 +#define ETH_MAC_GEN_V3_SERDES_32_TX_SEL_SERDES_3_SHIFT 12 + +/**** serdes_32_rx_sel register ****/ +/* 0 – directly from serdes1 – swapped2 – swapped with shift3 - ... */ +#define ETH_MAC_GEN_V3_SERDES_32_RX_SEL_SERDES_0_MASK 0x00000003 +#define ETH_MAC_GEN_V3_SERDES_32_RX_SEL_SERDES_0_SHIFT 0 +/* 0 – directly from serdes1 – swapped2 – swapped with shift3 - ... */ +#define ETH_MAC_GEN_V3_SERDES_32_RX_SEL_SERDES_1_MASK 0x00000030 +#define ETH_MAC_GEN_V3_SERDES_32_RX_SEL_SERDES_1_SHIFT 4 +/* 0 – directly from serdes1 – swapped2 – swapped with shift3 - ... */ +#define ETH_MAC_GEN_V3_SERDES_32_RX_SEL_SERDES_2_MASK 0x00000300 +#define ETH_MAC_GEN_V3_SERDES_32_RX_SEL_SERDES_2_SHIFT 8 +/* 0 – directly from serdes1 – swapped2 – swapped with shift3 - ... */ +#define ETH_MAC_GEN_V3_SERDES_32_RX_SEL_SERDES_3_MASK 0x00003000 +#define ETH_MAC_GEN_V3_SERDES_32_RX_SEL_SERDES_3_SHIFT 12 + +/**** an_lt_ctrl register ****/ +/* reset lane [3:0] */ +#define ETH_MAC_GEN_V3_AN_LT_CTRL_SW_RESET_MASK 0x0000000F +#define ETH_MAC_GEN_V3_AN_LT_CTRL_SW_RESET_SHIFT 0 + +/**** an_lt_0_addr register ****/ +/* Address value */ +#define ETH_MAC_GEN_V3_AN_LT_0_ADDR_VAL_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_AN_LT_0_ADDR_VAL_SHIFT 0 + +/**** an_lt_1_addr register ****/ +/* Address value */ +#define ETH_MAC_GEN_V3_AN_LT_1_ADDR_VAL_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_AN_LT_1_ADDR_VAL_SHIFT 0 + +/**** an_lt_2_addr register ****/ +/* Address value */ +#define ETH_MAC_GEN_V3_AN_LT_2_ADDR_VAL_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_AN_LT_2_ADDR_VAL_SHIFT 0 + +/**** an_lt_3_addr register ****/ +/* Address value */ +#define ETH_MAC_GEN_V3_AN_LT_3_ADDR_VAL_MASK 0x0000FFFF +#define ETH_MAC_GEN_V3_AN_LT_3_ADDR_VAL_SHIFT 0 + +/**** ext_serdes_ctrl register ****/ +/* Lane 0, SERDES selection:01 – 10G SERDES, lane 010 – 25G SERD ... */ +#define ETH_MAC_GEN_V3_EXT_SERDES_CTRL_LANE_0_SEL_25_10_MASK 0x00000003 +#define ETH_MAC_GEN_V3_EXT_SERDES_CTRL_LANE_0_SEL_25_10_SHIFT 0 +/* Lane 1, SERDES selection:01 – 10G SERDES, lane 110 – 25G SERD ... */ +#define ETH_MAC_GEN_V3_EXT_SERDES_CTRL_LANE_1_SEL_25_10_MASK 0x0000000C +#define ETH_MAC_GEN_V3_EXT_SERDES_CTRL_LANE_1_SEL_25_10_SHIFT 2 +/* Lane 2, SERDES selection:01 – 10G SERDES, lane 210 – 25G SERD ... */ +#define ETH_MAC_GEN_V3_EXT_SERDES_CTRL_LANE_2_SEL_25_10_MASK 0x00000030 +#define ETH_MAC_GEN_V3_EXT_SERDES_CTRL_LANE_2_SEL_25_10_SHIFT 4 +/* Lane 3, SERDES selection:01 – 10G SERDES, lane 310 – 25G SERD ... */ +#define ETH_MAC_GEN_V3_EXT_SERDES_CTRL_LANE_3_SEL_25_10_MASK 0x000000C0 +#define ETH_MAC_GEN_V3_EXT_SERDES_CTRL_LANE_3_SEL_25_10_SHIFT 6 + +/*** MAC Core registers addresses ***/ +/* command config */ +#define ETH_MAC_GEN_V3_MAC_40G_COMMAND_CONFIG_ADDR 0x00000008 +#define ETH_MAC_GEN_V3_MAC_40G_COMMAND_CONFIG_TX_ENA (1 << 0) +#define ETH_MAC_GEN_V3_MAC_40G_COMMAND_CONFIG_RX_ENA (1 << 1) +#define ETH_MAC_GEN_V3_MAC_40G_COMMAND_CONFIG_PFC_MODE (1 << 19) + +/* frame length */ +#define ETH_MAC_GEN_V3_MAC_40G_FRM_LENGTH_ADDR 0x00000014 + +#define ETH_MAC_GEN_V3_MAC_40G_CL01_PAUSE_QUANTA_ADDR 0x00000054 +#define ETH_MAC_GEN_V3_MAC_40G_CL23_PAUSE_QUANTA_ADDR 0x00000058 +#define ETH_MAC_GEN_V3_MAC_40G_CL45_PAUSE_QUANTA_ADDR 0x0000005C +#define ETH_MAC_GEN_V3_MAC_40G_CL67_PAUSE_QUANTA_ADDR 0x00000060 +#define ETH_MAC_GEN_V3_MAC_40G_CL01_QUANTA_THRESH_ADDR 0x00000064 +#define ETH_MAC_GEN_V3_MAC_40G_CL23_QUANTA_THRESH_ADDR 0x00000068 +#define ETH_MAC_GEN_V3_MAC_40G_CL45_QUANTA_THRESH_ADDR 0x0000006C +#define ETH_MAC_GEN_V3_MAC_40G_CL67_QUANTA_THRESH_ADDR 0x00000070 + +/*** PCS Core registers addresses ***/ +/* 40g control/status */ +#define ETH_MAC_GEN_V3_PCS_40G_CONTROL_STATUS_ADDR 0x00000000 +/* 10g control_1 */ +#define ETH_MAC_KR_PCS_CONTROL_1_ADDR 0x00000000 + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_ETH_MAC_REG_H */ + +/** @} end of Ethernet group */ diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_macsec.c b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_macsec.c new file mode 100644 index 00000000000000..e1e657a8760206 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_macsec.c @@ -0,0 +1,441 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/** + * @{ + * @file al_hal_eth_macsec.c + * + * @brief XG Ethernet unit HAL driver for macsec support (encryption/authentication) + * + * + */ + + +#include "al_hal_eth.h" +#include "al_hal_eth_macsec_regs.h" +#include "al_hal_eth_ec_regs.h" + + + +/* MacSec Constants */ +#define AL_ETH_MACSEC_REGS_BASE_OFFSET_FROM_EC 0x3000 +#define AL_ETH_MACSEC_SAD_DEPTH 128 +#define AL_ETH_MACSEC_SC_MAP_DEPTH 64 +#define AL_ETH_MACSEC_CIPHER_MODE_GCM 8 +#define AL_ETH_MACSEC_CIPHER_CNTR_SIZE_32 1 +#define AL_ETH_MACSEC_EC_GEN_L2_SIZE_802_3_MS_8 12 + + +/* MacSec SAD parameters */ +#define AL_ETH_MACSEC_SA_KEY_LSW 0 +#define AL_ETH_MACSEC_SA_KEY_MSW 7 +#define AL_ETH_MACSEC_SA_IV_LSW 8 +#define AL_ETH_MACSEC_SA_IV_MSW 11 +#define AL_ETH_MACSEC_SA_SCI_LSW 12 +#define AL_ETH_MACSEC_SA_SCI_MSW 13 +#define AL_ETH_MACSEC_SA_PN_MSW 14 +#define AL_ETH_MACSEC_SA_PARAMS_MSW 15 + +#define AL_ETH_MACSEC_SA_PARAMS_ENTRY_VALID_SHIFT 31 +#define AL_ETH_MACSEC_SA_PARAMS_KEY_SIZE_MSB_SHIFT 30 +#define AL_ETH_MACSEC_SA_PARAMS_KEY_SIZE_LSB_SHIFT 29 +#define AL_ETH_MACSEC_SA_PARAMS_KEY_SIZE_MASK AL_FIELD_MASK(AL_ETH_MACSEC_SA_PARAMS_KEY_SIZE_MSB_SHIFT, \ + AL_ETH_MACSEC_SA_PARAMS_KEY_SIZE_LSB_SHIFT) +#define AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_MSB_SHIFT 28 +#define AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_LSB_SHIFT 25 +#define AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_MASK AL_FIELD_MASK(AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_MSB_SHIFT, \ + AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_LSB_SHIFT) +#define AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_MAX_ALLOWED 16 +#define AL_ETH_MACSEC_SA_PARAMS_END_STATION_SHIFT 23 /* current node is an End-Station */ +#define AL_ETH_MACSEC_SA_PARAMS_SECTAG_SCI_SHIFT 22 /* if asserted, SCI field is to be encoded in the Sectag header + --> Sectag header is 16 Bytes long + (if deasserted, Sectag header length is 8 Bytes long) + */ +#define AL_ETH_MACSEC_SA_PARAMS_REPLAY_EN_SHIFT 20 /* Engine will perform replay-check on every RX pkt */ +#define AL_ETH_MACSEC_SA_PARAMS_ENCAPSULATION_EN_SHIFT 17 /* Add/Remove MacSec headers on TX/RX pkts */ +#define AL_ETH_MACSEC_SA_PARAMS_SECTAG_OFFSET_MSB_SHIFT 15 +#define AL_ETH_MACSEC_SA_PARAMS_SECTAG_OFFSET_LSB_SHIFT 0 +#define AL_ETH_MACSEC_SA_PARAMS_SECTAG_OFFSET_MASK AL_FIELD_MASK(AL_ETH_MACSEC_SA_PARAMS_SECTAG_OFFSET_MSB_SHIFT, \ + AL_ETH_MACSEC_SA_PARAMS_SECTAG_OFFSET_LSB_SHIFT) + +/* MacSec SA-CAM parameters */ +#define AL_ETH_MACSEC_SC_MAP_SCI_LSW 0 +#define AL_ETH_MACSEC_SC_MAP_SCI_MSW 1 +#define AL_ETH_MACSEC_SC_MAP_SCI_MASK_LSW 2 +#define AL_ETH_MACSEC_SC_MAP_SCI_MASK_MSW 3 +#define AL_ETH_MACSEC_SC_MAP_SC_INDEX_MSW 4 + + +/* internal function for performing write of a prepared SA entry to TX & RX SADs. + * This function handles relevant write to control register. + */ +static INLINE void al_eth_macsec_sad_entry_perform_write (struct al_macsec_regs *macsec_regs_base, uint8_t index) +{ + al_reg_write32(&macsec_regs_base->cache_access_tx_sad_control.tx_sad_control, index | MACSEC_CACHE_ACCESS_TX_SAD_CONTROL_TX_SAD_CONTROL_PERFORM_WRITE); + al_reg_write32(&macsec_regs_base->cache_access_rx_sad_control.rx_sad_control, index | MACSEC_CACHE_ACCESS_RX_SAD_CONTROL_RX_SAD_CONTROL_PERFORM_WRITE); +} + +/* internal function for performing read of a SA entry from TX & RX SADs. + * This function handles relevant write to control register. + */ +static INLINE void al_eth_macsec_sad_entry_perform_read (struct al_macsec_regs *macsec_regs_base, uint8_t index) +{ + al_reg_write32(&macsec_regs_base->cache_access_tx_sad_control.tx_sad_control, index | MACSEC_CACHE_ACCESS_TX_SAD_CONTROL_TX_SAD_CONTROL_PERFORM_READ); + al_reg_write32(&macsec_regs_base->cache_access_rx_sad_control.rx_sad_control, index | MACSEC_CACHE_ACCESS_RX_SAD_CONTROL_RX_SAD_CONTROL_PERFORM_READ); +} + +/* internal function for performing read of an SA *FIELD* from both TX & RX SADs. + * if tx_field or rx_field are NULL, the relevant read will not take place + */ +static INLINE void al_eth_macsec_sa_field_read (struct al_macsec_regs *macsec_regs_base, + uint8_t offset, uint32_t *tx_field, uint32_t *rx_field) +{ + if (tx_field != NULL) + *tx_field = al_reg_read32(&macsec_regs_base->cache_access_tx_sad_data[offset].tx_sad_data); + if (rx_field != NULL) + *rx_field = al_reg_read32(&macsec_regs_base->cache_access_rx_sad_data[offset].rx_sad_data); +} + +/* internal function for performing write of an SA *FIELD* to both TX & RX SADs */ +static INLINE void al_eth_macsec_sa_field_write (struct al_macsec_regs *macsec_regs_base, + uint8_t offset, uint32_t tx_field, uint32_t rx_field) +{ + al_reg_write32(&macsec_regs_base->cache_access_tx_sad_data[offset].tx_sad_data, tx_field); + al_reg_write32(&macsec_regs_base->cache_access_rx_sad_data[offset].rx_sad_data, rx_field); +} + +/* internal function extracting macsec regfile base address from adapter struct */ +static INLINE struct al_macsec_regs *al_eth_macsec_get_regfile_base (struct al_hal_eth_adapter *adapter) +{ + return (struct al_macsec_regs *)((void __iomem*)(adapter->ec_regs_base) + AL_ETH_MACSEC_REGS_BASE_OFFSET_FROM_EC); +} + + +void al_eth_macsec_init ( struct al_hal_eth_adapter *adapter, + al_bool sad_access_mode, + uint8_t num_an_per_sc, + uint32_t pn_threshold, + al_bool enable + ) +{ + uint32_t conf = 0; + struct al_macsec_regs *macsec_regs_base = al_eth_macsec_get_regfile_base(adapter); + + /* configuring size of 802_3_macsec_8 header */ + conf = al_reg_read32(&adapter->ec_regs_base->gen.l2); + AL_REG_FIELD_SET(conf, EC_GEN_L2_SIZE_802_3_MS_8_MASK, + EC_GEN_L2_SIZE_802_3_MS_8_SHIFT, + AL_ETH_MACSEC_EC_GEN_L2_SIZE_802_3_MS_8); + al_reg_write32(&adapter->ec_regs_base->gen.l2, conf); + + /* allowing macsec to modify completion descriptor */ + conf = al_reg_read32(&adapter->ec_regs_base->rfw.out_cfg); + conf |= EC_RFW_OUT_CFG_EN_MACSEC_DEC; + al_reg_write32(&adapter->ec_regs_base->rfw.out_cfg, conf); + + /* macsec crypto engine initialization */ + al_dbg("eth [%s]: enable macsec crypto engine\n", adapter->name); + conf = 0; + AL_REG_FIELD_SET(conf, MACSEC_CONFIGURATION_CONF_MACSEC_TX_CRYPTO_ENGINE_AES_MODE_MASK, + MACSEC_CONFIGURATION_CONF_MACSEC_TX_CRYPTO_ENGINE_AES_MODE_SHIFT, + AL_ETH_MACSEC_CIPHER_MODE_GCM); + AL_REG_FIELD_SET(conf, MACSEC_CONFIGURATION_CONF_MACSEC_TX_CRYPTO_ENGINE_AES_CNTR_STEP_MASK, + MACSEC_CONFIGURATION_CONF_MACSEC_TX_CRYPTO_ENGINE_AES_CNTR_STEP_SHIFT, + AL_ETH_MACSEC_CIPHER_CNTR_SIZE_32); + + /* sad_access_mode */ + if (sad_access_mode) + conf |= MACSEC_CONFIGURATION_CONF_MACSEC_TX_DIRECT_ACCESS; + + /* num_an_per_sc. + * according to 802.1AE, the legal values for num_an_per_sc are 2,4 */ + al_assert((num_an_per_sc == 2) || (num_an_per_sc == 4)); + if (num_an_per_sc == 4) { + conf |= MACSEC_CONFIGURATION_CONF_MACSEC_TX_NUM_SA_PER_SC; + } + + if (enable) + conf |= MACSEC_CONFIGURATION_CONF_MACSEC_TX_ENABLE_ENGINE; + + /* enabling tunnel-mode (macsec/GRE) */ + conf |= MACSEC_CONFIGURATION_CONF_MACSEC_RX_TUNNEL_ENABLE; + + /* Initialize the TX engine */ + al_reg_write32(&macsec_regs_base->configuration.conf_macsec_tx_pn_thr, pn_threshold); + al_reg_write32(&macsec_regs_base->configuration.conf_macsec_tx, conf); + + /* Initialize the RX engine (same fields as in TX configuration) */ + al_reg_write32(&macsec_regs_base->configuration.conf_macsec_rx_pn_thr, pn_threshold); + al_reg_write32(&macsec_regs_base->configuration.conf_macsec_rx, conf); +} + + +void al_eth_macsec_sad_entry_write ( struct al_hal_eth_adapter *adapter, + struct al_eth_macsec_sa *sa, + uint8_t index) +{ + int i; + uint32_t params; + struct al_macsec_regs *macsec_regs_base = al_eth_macsec_get_regfile_base(adapter); + + /* verifying that index is legal */ + al_assert (index < AL_ETH_MACSEC_SAD_DEPTH); + + /* set key */ + for (i = AL_ETH_MACSEC_SA_KEY_LSW ; i <= AL_ETH_MACSEC_SA_KEY_MSW ; i++) { + al_eth_macsec_sa_field_write(macsec_regs_base, i, + sa->key[AL_ETH_MACSEC_SA_KEY_MSW-i], + sa->key[AL_ETH_MACSEC_SA_KEY_MSW-i]); + } + + /* set IV (Initialization Vector) */ + for (i = AL_ETH_MACSEC_SA_IV_LSW ; i <= AL_ETH_MACSEC_SA_IV_MSW ; i++) { + al_eth_macsec_sa_field_write(macsec_regs_base, i, + sa->iv[AL_ETH_MACSEC_SA_IV_MSW-i], + sa->iv[AL_ETH_MACSEC_SA_IV_MSW-i]); + } + + /* set SCI (Secure Channel Identifier) */ + for (i = AL_ETH_MACSEC_SA_SCI_LSW ; i <= AL_ETH_MACSEC_SA_SCI_MSW ; i++) { + al_eth_macsec_sa_field_write(macsec_regs_base, i, + sa->sci[AL_ETH_MACSEC_SA_SCI_MSW-i], + sa->sci[AL_ETH_MACSEC_SA_SCI_MSW-i]); + } + + /* set PN (Packet Number) */ + al_eth_macsec_sa_field_write(macsec_regs_base, AL_ETH_MACSEC_SA_PN_MSW, sa->pn_tx, sa->pn_rx); + + /* adjusting signature_size. + * value 0 means signature size of AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_MAX_ALLOWED Bytes + */ + al_assert(sa->signature_size <= AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_MAX_ALLOWED); + if (sa->signature_size == AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_MAX_ALLOWED) + sa->signature_size = 0; + + /* set macsec Params */ + params = (sa->sectag_offset << AL_ETH_MACSEC_SA_PARAMS_SECTAG_OFFSET_LSB_SHIFT ) | + (sa->sectag_tci_encode_sci << AL_ETH_MACSEC_SA_PARAMS_SECTAG_SCI_SHIFT ) | + (sa->sectag_tci_end_station << AL_ETH_MACSEC_SA_PARAMS_END_STATION_SHIFT ) | + (sa->macsec_encapsulation_en << AL_ETH_MACSEC_SA_PARAMS_ENCAPSULATION_EN_SHIFT ) | + (sa->replay_offload_en << AL_ETH_MACSEC_SA_PARAMS_REPLAY_EN_SHIFT ) | + (sa->signature_size << AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_LSB_SHIFT ) | + (sa->key_size << AL_ETH_MACSEC_SA_PARAMS_KEY_SIZE_LSB_SHIFT ) | + (sa->valid << AL_ETH_MACSEC_SA_PARAMS_ENTRY_VALID_SHIFT ); + + al_eth_macsec_sa_field_write(macsec_regs_base, AL_ETH_MACSEC_SA_PARAMS_MSW, params, params); + + /* Perform write to SAD */ + al_eth_macsec_sad_entry_perform_write(macsec_regs_base, index); +} + + +void al_eth_macsec_sad_entry_read ( struct al_hal_eth_adapter *adapter, + struct al_eth_macsec_sa *sa, + uint8_t index) +{ + int i; + uint32_t params; + struct al_macsec_regs *macsec_regs_base = al_eth_macsec_get_regfile_base(adapter); + + /* verifying that index is legal */ + al_assert (index < AL_ETH_MACSEC_SAD_DEPTH); + + /* Perform read from SAD */ + al_eth_macsec_sad_entry_perform_read(macsec_regs_base, index); + + /* get key (enough to read from TX SAD only, since TX & RX SADs are guaranteed by design to have equal keys) */ + for (i = AL_ETH_MACSEC_SA_KEY_LSW ; i <= AL_ETH_MACSEC_SA_KEY_MSW ; i++) + al_eth_macsec_sa_field_read(macsec_regs_base, i, &sa->key[AL_ETH_MACSEC_SA_KEY_MSW-i], NULL); + + /* get IV (Initialization Vector) */ + for (i = AL_ETH_MACSEC_SA_IV_LSW ; i <= AL_ETH_MACSEC_SA_IV_MSW ; i++) + al_eth_macsec_sa_field_read(macsec_regs_base, i, &sa->iv[AL_ETH_MACSEC_SA_IV_MSW-i], NULL); + + /* get SCI (Secure Channel Identifier) */ + for (i = AL_ETH_MACSEC_SA_SCI_LSW ; i <= AL_ETH_MACSEC_SA_SCI_MSW ; i++) + al_eth_macsec_sa_field_read(macsec_regs_base, i, &sa->sci[AL_ETH_MACSEC_SA_SCI_MSW-i], NULL); + + /* get PN (Packet Number) */ + al_eth_macsec_sa_field_read(macsec_regs_base, AL_ETH_MACSEC_SA_PN_MSW, &sa->pn_tx, &sa->pn_rx); + + /* get macsec Params */ + al_eth_macsec_sa_field_read(macsec_regs_base, AL_ETH_MACSEC_SA_PARAMS_MSW, ¶ms, NULL); + + /* parsing params: */ + sa->sectag_offset = AL_REG_FIELD_GET(params, + AL_ETH_MACSEC_SA_PARAMS_SECTAG_OFFSET_MASK, + AL_ETH_MACSEC_SA_PARAMS_SECTAG_OFFSET_LSB_SHIFT); + sa->sectag_tci_encode_sci = AL_REG_FIELD_GET(params, + AL_BIT(AL_ETH_MACSEC_SA_PARAMS_SECTAG_SCI_SHIFT), + AL_ETH_MACSEC_SA_PARAMS_SECTAG_SCI_SHIFT); + sa->sectag_tci_end_station = AL_REG_FIELD_GET(params, + AL_BIT(AL_ETH_MACSEC_SA_PARAMS_END_STATION_SHIFT), + AL_ETH_MACSEC_SA_PARAMS_END_STATION_SHIFT); + sa->macsec_encapsulation_en = AL_REG_FIELD_GET(params, + AL_BIT(AL_ETH_MACSEC_SA_PARAMS_ENCAPSULATION_EN_SHIFT), + AL_ETH_MACSEC_SA_PARAMS_ENCAPSULATION_EN_SHIFT); + sa->replay_offload_en = AL_REG_FIELD_GET(params, + AL_BIT(AL_ETH_MACSEC_SA_PARAMS_REPLAY_EN_SHIFT), + AL_ETH_MACSEC_SA_PARAMS_REPLAY_EN_SHIFT); + sa->signature_size = AL_REG_FIELD_GET(params, + AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_MASK, + AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_LSB_SHIFT); + sa->key_size = AL_REG_FIELD_GET(params, + AL_ETH_MACSEC_SA_PARAMS_KEY_SIZE_MASK, + AL_ETH_MACSEC_SA_PARAMS_KEY_SIZE_LSB_SHIFT); + sa->valid = AL_REG_FIELD_GET(params, + AL_BIT(AL_ETH_MACSEC_SA_PARAMS_ENTRY_VALID_SHIFT), + AL_ETH_MACSEC_SA_PARAMS_ENTRY_VALID_SHIFT); + + /* adjusting signature_size. + * since value 0 means signature size of AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_MAX_ALLOWED Bytes + */ + if (sa->signature_size == 0) + sa->signature_size = AL_ETH_MACSEC_SA_PARAMS_SIGN_SIZE_MAX_ALLOWED; + +} + + +al_bool al_eth_macsec_sad_entry_is_valid (struct al_hal_eth_adapter *adapter, uint8_t index) +{ + uint32_t params_tx, params_rx; + struct al_macsec_regs *macsec_regs_base = al_eth_macsec_get_regfile_base(adapter); + + /* verifying that index is legal */ + al_assert (index < AL_ETH_MACSEC_SAD_DEPTH); + + /* Perform read from SAD */ + al_eth_macsec_sad_entry_perform_read(macsec_regs_base, index); + + /* read SA's params, to check if entry is valid */ + al_eth_macsec_sa_field_read(macsec_regs_base, AL_ETH_MACSEC_SA_PARAMS_MSW, ¶ms_tx, ¶ms_rx); + + return ((params_tx & AL_BIT(AL_ETH_MACSEC_SA_PARAMS_ENTRY_VALID_SHIFT)) && + (params_rx & AL_BIT(AL_ETH_MACSEC_SA_PARAMS_ENTRY_VALID_SHIFT)) ) ? AL_TRUE : AL_FALSE; +} + + +void al_eth_macsec_sad_entry_invalidate (struct al_hal_eth_adapter *adapter, uint8_t index) +{ + uint32_t params; + struct al_macsec_regs *macsec_regs_base = al_eth_macsec_get_regfile_base(adapter); + + /* verifying that index is legal */ + al_assert (index < AL_ETH_MACSEC_SAD_DEPTH); + + /* invalidate SA entry */ + params = 0; + al_eth_macsec_sa_field_write(macsec_regs_base, AL_ETH_MACSEC_SA_PARAMS_MSW, params, params); + + /* Perform write to SAD */ + al_eth_macsec_sad_entry_perform_write(macsec_regs_base, index); +} + +void al_eth_macsec_sc_map_entry_write (struct al_hal_eth_adapter *adapter, + uint32_t *sci, + uint32_t *sci_mask, + uint8_t sc_index) +{ + int i; + struct al_macsec_regs *macsec_regs_base = al_eth_macsec_get_regfile_base(adapter); + + /* verifying that sc index is legal */ + al_assert (sc_index < AL_ETH_MACSEC_SC_MAP_DEPTH); + + /* set SCI (Secure Channel Identifier) */ + for (i = AL_ETH_MACSEC_SC_MAP_SCI_LSW ; i <= AL_ETH_MACSEC_SC_MAP_SCI_MSW ; i++) { + al_reg_write32(&macsec_regs_base->cam_access_rx_sa_index_build_data[i].sa_index_build_data , + sci[AL_ETH_MACSEC_SC_MAP_SCI_MSW-i]); + } + + /* set SCI Mask (Secure Channel Identifier's Mask) */ + for (i = AL_ETH_MACSEC_SC_MAP_SCI_MASK_LSW ; i <= AL_ETH_MACSEC_SC_MAP_SCI_MASK_MSW ; i++) { + al_reg_write32(&macsec_regs_base->cam_access_rx_sa_index_build_data[i].sa_index_build_data , + sci_mask[AL_ETH_MACSEC_SC_MAP_SCI_MASK_MSW-i]); + } + + /* set SC Index (Secure Channel Index) */ + al_reg_write32(&macsec_regs_base->cam_access_rx_sa_index_build_data[AL_ETH_MACSEC_SC_MAP_SC_INDEX_MSW].sa_index_build_data , + sc_index); + + /* perform write */ + al_reg_write32(&macsec_regs_base->cam_access_rx_sa_index_build_control.sa_index_build_control, + sc_index | MACSEC_CAM_ACCESS_RX_SA_INDEX_BUILD_CONTROL_SA_INDEX_BUILD_CONTROL_PERFORM_WRITE); +} + +unsigned int al_eth_macsec_read_exhausted_tx_sa_index (struct al_hal_eth_adapter *adapter) +{ + struct al_macsec_regs *macsec_regs_base = al_eth_macsec_get_regfile_base(adapter); + uint32_t tx_status; + unsigned int exhausted_sa_index; + + tx_status = al_reg_read32(&macsec_regs_base->status.status_macsec_tx); + exhausted_sa_index = (unsigned int)((tx_status & MACSEC_STATUS_STATUS_MACSEC_TX_PN_THR_SA_INDEX_MASK) + >> MACSEC_STATUS_STATUS_MACSEC_TX_PN_THR_SA_INDEX_SHIFT); + + return exhausted_sa_index; +} + +unsigned int al_eth_macsec_read_exhausted_rx_sa_index (struct al_hal_eth_adapter *adapter) +{ + struct al_macsec_regs *macsec_regs_base = al_eth_macsec_get_regfile_base(adapter); + uint32_t rx_status; + unsigned int exhausted_sa_index; + + rx_status = al_reg_read32(&macsec_regs_base->status.status_macsec_rx); + exhausted_sa_index = (unsigned int)((rx_status & MACSEC_STATUS_STATUS_MACSEC_RX_PN_THR_SA_INDEX_MASK) >> + MACSEC_STATUS_STATUS_MACSEC_RX_PN_THR_SA_INDEX_SHIFT); + + return exhausted_sa_index; +} + +void al_eth_macsec_read_stats_cntr (struct al_hal_eth_adapter *adapter, struct al_eth_macsec_stats_cntr *stats_cntrs) +{ + struct al_macsec_regs *macsec_regs_base = al_eth_macsec_get_regfile_base(adapter); + + al_assert (stats_cntrs != NULL); + + stats_cntrs->tx_pkts = al_reg_read32(&macsec_regs_base->statistics.macsec_tx_pkts); + stats_cntrs->tx_secured_pkts = al_reg_read32(&macsec_regs_base->statistics.macsec_tx_controlled_pkts); + stats_cntrs->tx_unsecured_pkts = al_reg_read32(&macsec_regs_base->statistics.macsec_tx_uncontrolled_pkts); + stats_cntrs->rx_pkts = al_reg_read32(&macsec_regs_base->statistics.macsec_rx_pkts); + stats_cntrs->rx_secured_pkts = al_reg_read32(&macsec_regs_base->statistics.macsec_rx_controlled_pkts); + stats_cntrs->rx_unsecured_pkts = al_reg_read32(&macsec_regs_base->statistics.macsec_rx_uncontrolled_pkts); + stats_cntrs->rx_replay_fail_pkts = al_reg_read32(&macsec_regs_base->statistics.macsec_rx_replay_pkts); + stats_cntrs->rx_auth_fail_pkts = al_reg_read32(&macsec_regs_base->statistics.macsec_rx_auth_fail_pkts); +} + diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_macsec.h b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_macsec.h new file mode 100644 index 00000000000000..51e84503b49944 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_macsec.h @@ -0,0 +1,520 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_eth_macsec_api API + * Ethernet Controller MacSec HAL driver API + * @ingroup group_eth + * @{ + * @file al_hal_eth_macsec.h + * + * @brief Header file for unified 10GbE/1GbE Ethernet Controller's MacSec engine. + */ + +#ifndef __AL_HAL_ETH_MACSEC_H__ +#define __AL_HAL_ETH_MACSEC_H__ + +#include +#include "al_hal_eth.h" + + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + + +/******************************************************************************* +This file defines the API for MAC Security protocol (MacSec), which provides +the possibility to maintain a secured L2 or overlay (tunnel) link, at line rate +bandwidth (10Gbps). +The advanced Ethernet Controller in Alpine Platform-on-Chip, implementes an +inline MacSec accelerator, which enables each Tx/Rx pkt to be +encrypted/decrypted and signed/authenticated, as defined in IEEE 802.1AE, as +well as in novel extensions, enabling inline AES-GCM-GMAC on alternative payload +inluding L3 or passenger tunnels. +The MacSec engine is designed to provide L2 security, with minimum SW changes, +in order to help the user achieve maximum performance & security, with minimum +overhead. The engine supports AES-128, AES-192 and AES-256 encryption, and +could be configured for Authentication-mode only or full data protection that +provides encryption and authentication. + + +Terminology: +================= +1. SA - Security Association. A set of cryptographic parameters needed to secure + the traffic (such as key, initialization-vector, packet-number, etc). + For improved performance, SAs are stored in an internal scratch-pad + memory named SAD (Security Association Data-Structure). + Each SAD entry stores a single SA. +2. SC - Secured Channel. A point-to-point secured link. + Each SC comprises a succession of up to 4 SAs, which are indexed by a + two-bit integer called AN (Association Number). + Each TX MacSec packet is given a SC-index and an AN, from which the + relevant SA index is constructed. +3. Association Number - index of currently used SA in the Secured Channel (used + for hot swap). +4. Hot Swap - Switching SAs (key, IV, etc) in runtime. + Hot-Swap is supported by pre-loading up-to 4 SAs per SC, and using the + AN to switch between SAs in runtime. + + TX side: The transmitter is responsible for initiating hot-swap. + Once the TX packet-number reaches a pre-configured + threshold, an IRQ is raised, signaling the upper layer + to switch keys. This is performed by simply incrementing + the AN of the next packets to be transmitted. + Note: AN is encoded in the MacSec SECTAG header. + + RX side: The receiver acts as slave in the hot-swap mechanism. It + receives the SC & AN indication from the SECTAG header, + and thus when the transmitter performs hot-swap + (increments AN), the receiver immediately follows. +5. SAD - Security Association Data-Structure, scratch-pad memory for storing SAs. + TX-SAD & RX-SAD are always written in pairs, and maintained by the HW. + Maintanance includes incrementing of PN value (Packet-Number). + + TX side: next PN to be coded in MacSec header + + RX side: next expected PN (used for replay attack protection) + + AN SAD can store up to 128 SAs (== SAD entries). + In common usage (i.e. sad_access_mode=0, num_an_per_sc=0), SAD will + be organized as follows: + ================================================= + | SA_INDEX | SC_INDEX | AN | + ==================================== + PF SAs: | 0 | 0 | 0 | + ======= | 1 | 0 | 1 | + | 2 | 1 | 0 | + .... + .... + | 30 | 15 | 0 | + | 31 | 15 | 1 | + + VF1 SAs: | 32 | 16 | 0 | + ======== | 33 | 16 | 1 | + .... + .... + | 62 | 31 | 0 | + | 63 | 31 | 1 | + + VF2 SAs: | 64 | 32 | 0 | + ======== | 65 | 32 | 1 | + .... + .... + | 94 | 47 | 0 | + | 95 | 47 | 1 | + + VF3 SAs: | 96 | 48 | 0 | + ======== | 97 | 48 | 1 | + .... + .... + | 126 | 63 | 0 | + | 127 | 63 | 1 | + ================================================= + + +Common Usage: +================= + +1. Initialization stage + initialization should be performed by by the the main driver, and in + case of IO Virtualization/SRIOV, the MacSec engine must be initialized + by the PF driver (vs. the VF driver) + During initialization, user must make sure that no traffic is sent/received. + -------------------------- + + common usage example: + -------------------------- + al_eth_macsec_init (adapter, 0, 0, 0xFF000000, 1); + +2. Loading crypto parameters + MacSec engine contains a scratch-pad memory, called SAD, for storing + cryptographic parameters (SAs) of each Secured Channel. + SAD should be updated periodically by the PF, according to an upper- + layer protocl such as 802.1AF. + -------------------------- + + common usage example: + -------------------------- + struct al_eth_macsec_sa sa = { + .valid = 1, + .key_size = AL_ETH_KEY_SIZE_128, + .key = , + + .iv = , + .sci = , + .pn_rx = , + .pn_tx = , + .signature_size = 16, + + .replay_offload_en = 1, + .macsec_encapsulation_en = 1, + .sectag_tci_end_station = 0, + .sectag_tci_encode_sci = 1, + .sectag_offset = 12 + }; + al_eth_macsec_sad_entry_write (adapter, &sa, ); + al_eth_macsec_sc_map_entry_write (adapter, sa.sci, sci_mask, ); + // sc_index should be construct as sad_index >> 1/2 (depending on number of AN/SC) + + + * NOTE1: index should be constructed as follows + (2 options, dependent on initialization): + + option #1: 5-bit secured channel, 2-bit association number + + option #2: 6-bit secured_channel, 1-bit association number + * NOTE2: in common usage, each VF receives 32 dedicated SA entries. + in such a case, the relevant offset (VF ID x 32) should be + added to index when calling al_eth_macsec_sad_entry_write. + +3. Sending macsec packets is performed independently by the VFs. + -------------------------- + + common usage example: + -------------------------- + struct al_eth_pkt pkt { + ... + .macsec_secure_channel = , + .macsec_association_number = , + .macsec_flags = 7 // encrypt & sign pkt flags + // for further details regarding macsec_flags, see: + // AL_ETH_MACSEC_TX_FLAGS_* and AL_ETH_MACSEC_RX_FLAGS_* + ... + }; + pkt.flags |= AL_ETH_TX_FLAGS_L2_MACSEC_PKT; // pkt is macsec-pkt + pkt.macsec_flags = AL_ETH_MACSEC_TX_FLAGS_SIGN; // pkt should be signed + + pkt.macsec_secure_channel = ; + pkt.macsec_association_number = ; + al_eth_tx_pkt_prepare(tx_dma_q, &pkt); + +4. Receiving macsec packets is performed independetly by the VFs. + -------------------------- + + common usage example: + -------------------------- + // ... code for receiving a pkt:... // + + // checking if packet is macsec: + al_bool is_macsec_pkt = pkt.macsec_flags & AL_ETH_MACSEC_RX_FLAGS_IS_MACSEC; + + // checking for authentication success: + al_bool is_auth_sucess = is_macsec_pkt & + (pkt.macsec_flags & AL_ETH_MACSEC_RX_FLAGS_AUTH_SUCCESS); + +*******************************************************************************/ + + +/** MacSec Constants */ +#define AL_ETH_MACSEC_KEY_NUM_WORDS 8 /**< 32 Bytes */ +#define AL_ETH_MACSEC_IV_NUM_WORDS 4 /**< 16 Bytes */ +#define AL_ETH_MACSEC_SCI_NUM_WORDS 2 /**< 8 Bytes */ + +/** Packet Rx Macsec flags */ +#define AL_ETH_MACSEC_RX_FLAGS_MSB_SHIFT 31 +#define AL_ETH_MACSEC_RX_FLAGS_LSB_SHIFT 16 +#define AL_ETH_MACSEC_RX_FLAGS_MASK AL_FIELD_MASK(AL_ETH_MACSEC_RX_FLAGS_MSB_SHIFT, \ + AL_ETH_MACSEC_RX_FLAGS_LSB_SHIFT) +#define AL_ETH_MACSEC_RX_FLAGS_IS_MACSEC AL_BIT(0) /**< Sign TX pkt */ +#define AL_ETH_MACSEC_RX_FLAGS_SL_MSB_SHIFT 6 /**< Short Length */ +#define AL_ETH_MACSEC_RX_FLAGS_SL_LSB_SHIFT 1 +#define AL_ETH_MACSEC_RX_FLAGS_SL_MASK AL_FIELD_MASK(AL_ETH_MACSEC_RX_FLAGS_SL_MSB_SHIFT, \ + AL_ETH_MACSEC_RX_FLAGS_SL_LSB_SHIFT) +#define AL_ETH_MACSEC_RX_FLAGS_AN_MSB_SHIFT 8 /*< Association Number */ +#define AL_ETH_MACSEC_RX_FLAGS_AN_LSB_SHIFT 7 +#define AL_ETH_MACSEC_RX_FLAGS_AN_MASK AL_FIELD_MASK(AL_ETH_MACSEC_RX_FLAGS_AN_MSB_SHIFT, \ + AL_ETH_MACSEC_RX_FLAGS_AN_LSB_SHIFT) +#define AL_ETH_MACSEC_RX_FLAGS_IS_ENCRYPTED_SHIFT 9 /**< if asserted, rx L2 pkt was encrypted */ +#define AL_ETH_MACSEC_RX_FLAGS_IS_ENCRYPTED_MASK AL_BIT(AL_ETH_MACSEC_RX_FLAGS_IS_ENCRYPTED_SHIFT) +#define AL_ETH_MACSEC_RX_FLAGS_IS_SIGNED_SHIFT 10 /**< if asserted, rx L2 pkt was signed */ +#define AL_ETH_MACSEC_RX_FLAGS_IS_SIGNED_MASK AL_BIT(AL_ETH_MACSEC_RX_FLAGS_IS_SIGNED_SHIFT) +#define AL_ETH_MACSEC_RX_FLAGS_IS_REPLAY_PROTECTED_SHIFT 11 /**< if asserted, replay protection mechanism was applied to rx pkt */ +#define AL_ETH_MACSEC_RX_FLAGS_IS_REPLAY_PROTECTED_MASK AL_BIT(AL_ETH_MACSEC_RX_FLAGS_IS_REPLAY_PROTECTED_SHIFT) +#define AL_ETH_MACSEC_RX_FLAGS_AUTH_SUCCESS_SHIFT 12 /**< if asserted, rx pkt's signature was verified to be correct */ +#define AL_ETH_MACSEC_RX_FLAGS_AUTH_SUCCESS_MASK AL_BIT(AL_ETH_MACSEC_RX_FLAGS_AUTH_SUCCESS_SHIFT) +#define AL_ETH_MACSEC_RX_FLAGS_REPLAY_SUCCESS_SHIFT 13 /**< if asserted, pkt is verified to not be a replay-attack pkt */ +#define AL_ETH_MACSEC_RX_FLAGS_REPLAY_SUCCESS_MASK AL_BIT(AL_ETH_MACSEC_RX_FLAGS_REPLAY_SUCCESS_SHIFT) + + + +/** Encryption key size. + * AES key size (traditionally referring to bit=length) + */ +enum al_eth_macsec_key_size {AL_ETH_KEY_SIZE_128, AL_ETH_KEY_SIZE_192, AL_ETH_KEY_SIZE_256}; + + +/** MacSec statistics counters. + * Statistics counters interface + */ +struct al_eth_macsec_stats_cntr { + uint32_t tx_pkts; /* number of tx pkts processed by the MacSec engine */ + uint32_t tx_secured_pkts; /* number of tx pkts processed by the MacSec engine + which are either encrypted or authenticated (or both) */ + uint32_t tx_unsecured_pkts; /* number of tx pkts processed by the MacSec engine + which are neither encrypted nor authenticated */ + uint32_t rx_pkts; /* number of rx pkts processed by the MacSec engine */ + uint32_t rx_secured_pkts; /* number of rx pkts processed by the MacSec engine + which are either encrypted or authenticated (or both) */ + uint32_t rx_unsecured_pkts; /* number of rx pkts processed by the MacSec engine + which are neither encrypted nor authenticated */ + uint32_t rx_replay_fail_pkts; /* number of rx pkts processed by the MacSec engine + which failed replay check */ + uint32_t rx_auth_fail_pkts; /* number of rx pkts processed by the MacSec engine + which failed authentication */ + }; + + +/** MacSec SAD (Security Association Data-Structure). + * MacSec engine has an internal scratch-pad memory, used to cache security + * parameters for active channels. A SAD entry is called SA, and is described + * by the structure below. + * NOTE: for all multiple-words fields, word #0 is LSW. + */ +struct al_eth_macsec_sa { + al_bool valid; /* entry is valid */ + + /** cryptographic parameters */ + enum al_eth_macsec_key_size key_size; + uint32_t key[AL_ETH_MACSEC_KEY_NUM_WORDS]; /* Encryption Key. For 128/192-bit keys, pad LSWs with zeros */ + + uint32_t iv[AL_ETH_MACSEC_IV_NUM_WORDS]; /* == AES_k(0). Meaning, this field should be written with + the result of encrypting 16 zero-bytes, with the SA's key. + This is not abstracted from the user, in order not to insert + encryption algorithm into a low-level HAL code + */ + + uint32_t sci[AL_ETH_MACSEC_SCI_NUM_WORDS]; /* Secure Channel Identifier */ + uint32_t pn_tx; /* Packet Number for TX packets */ + uint32_t pn_rx; /* Packet Number for RX packets (maintained for replay attack protection) */ + uint8_t signature_size:5; /* legal values are 1-16 */ + + /** macsec protocol parameters */ + al_bool replay_offload_en; /* when asserted, hw will perform replay protection for incoming pkts */ + al_bool macsec_encapsulation_en; /* when asserted, macsec will seamlessly add (tx) & remove (rx) + macsec headers to/from packet. + */ + al_bool sectag_tci_end_station; /* current node is an end-station. See 802.1AE for further details. */ + al_bool sectag_tci_encode_sci; /* when asserted, sci-field will be encoded in all tx macsec packets + see 802.1AE for further details + */ + uint16_t sectag_offset; /* location of MacSec header (SECTAG). for regular (non-tunneled) traffic, + this field should be written with 0xC (SA len + DA len). + otherwise, GRE header length should be taken into account. + */ +}; + + +/* forward declaration */ +struct al_hal_eth_adapter; + + +/** + * @brief This function initializes and enables the inline MacSec engine. + * - Initialize TX and RX inline Crypto engines + * - Enable TX and RX inline Crypto engines + * + * @param adapter Pointer to eth's private data-structure. The + * adapter is used in order to extract the base + * macsec configuration address. + * @param sad_access_mode if asserted, all SAD entries will be available + * to all VFs. Otherwise, each VF will get + * AL_ETH_MACSEC_SAD_DEPTH/4 dedicated entries + * for caching SAs. + * @param num_an_per_sc Number of ANs (Association Numbers) per SC + * (Secure Channel). ANs are aimed to allow + * hot-swap of encryption keys. + * ** 802.1AE allows only 2 or 4 ANs per SC. + * @param pn_threshold when Packet-Number (which is maintained by HW) + * passes this threshold, an IRQ will be raised + * signaling the driver that it is time to load a + * new SA to SAD + * @param enable if asserted, enable the inline MacSec engine + * (which is disabled at reset) + * + */ +void al_eth_macsec_init ( struct al_hal_eth_adapter *adapter, + al_bool sad_access_mode, + uint8_t num_an_per_sc, + uint32_t pn_threshold, + al_bool enable + ); + + +/** + * @brief this function writes a Security Association (SA) to the + * Security Association Data-Structure (SAD). The SAD can store up + * to 128 SAs. + * This function writes both TX-SAD & RX-SAD, since SA should always + * come in pairs. the only value that is allowed to differ between + * TX-SAD & RX-SAD is PN (packet-number). + * As explained in the terminology section, user can store up to 4 + * SAs per SC, to support hot swap of crypto parameters. + * When calling this function, user should consider the + * configuration of the MacSec engine: + * + if sad_access_mode was set to 1'b0, user should arrange SAD as + * 4 distinct index-spaces, one for each VF. + * [ PF : sa0 , sa1 , ..,. sa31 ] + * [ VF1: sa32, sa33, ... sa63 ] + * [ VF2: sa64, sa65, ... sa95 ] + * [ VF3: sa96, sa97, ... sa127 ] + * + if num_an_per_sc was set to 1'b1, each SC should have 4 SAs. + * otherwise, each SC should have 2 ANs. + * + * Note: This function is allowed to be called by the PF driver only. + * + * @param adapter Pointer to eth's private data-structure. The + * adapter is used in order to extract the base + * macsec configuration address. + * @param sa SA to be written to SAD + * @param index SAD index to be written. Legal values are 0-127. + */ +void al_eth_macsec_sad_entry_write ( struct al_hal_eth_adapter *adapter, + struct al_eth_macsec_sa *sa, + uint8_t index); + + +/** + * @brief this function reads a Security Association (SA) from the + * Security Association Data-Structure (SAD). + * + * Note: This function is allowed to be called by the PF driver only. + * + * @param adapter Pointer to eth's private data-structure. The + * adapter is used in order to extract the base + * macsec configuration address. + * @param sa read SA from SAD + * @param index SAD index to be read. Legal values are 0-127. + */ +void al_eth_macsec_sad_entry_read ( struct al_hal_eth_adapter *adapter, + struct al_eth_macsec_sa *sa, + uint8_t index); + + +/** + * @brief this function checks if a SAD entry (SA) is valid. + * + * Note: This function is allowed to be called by the PF driver only. + * + * @param adapter Pointer to eth's private data-structure. The + * adapter is used in order to extract the base + * macsec configuration address. + * @param index SAD index to be checked. Legal values are 0-127. + */ +al_bool al_eth_macsec_sad_entry_is_valid (struct al_hal_eth_adapter *adapter, uint8_t index); + + +/** + * @brief this function invalidates a SAD entry (SA). + * + * Note: This function is allowed to be called by the PF driver only. + * + * @param adapter Pointer to eth's private data-structure. The + * adapter is used in order to extract the base + * macsec configuration address. + * @param index SAD index to be invalidated. + * Legal values are 0-127. + */ +void al_eth_macsec_sad_entry_invalidate (struct al_hal_eth_adapter *adapter, uint8_t index); + + +/** + * @brief this function writes an entry to the SC map table. + * The SC map is responsible for mapping between SCI and SC indexes + * and is used to extract SA index from the (rx) packet's macsec header + * (namely, its SCI & AN fields). + * This function should be called immediately after loading a new + * SA to the RX-SAD. + * @param adapter Pointer to eth's private data-structure. The + * adapter is used in order to extract the base + * macsec configuration address. + * @param sci pointer to an array of two uint32_t elements, + * which holds the 64-bit SCI (Secured Channel Identifier) + * that will be recorded in the SC map + * @param sci_mask pointer to an array of two uint32_t elements + * which holds the 64-bit SCI_MASK that will be + * recorded in the SC map. + * Typically, this field will be written with all-ones + * @param sc_index Secured Channel Index - should be the same as relevant + * SA address, but w/o the 1/2 LSBs which are used for + * AN (Association Number) + */ +void al_eth_macsec_sc_map_entry_write (struct al_hal_eth_adapter *adapter, + uint32_t *sci, + uint32_t *sci_mask, + uint8_t sc_index); + + +/** + * @brief this function returns the index of the last TX SA which has been exhausted. + * It should be called upon receiving a tx_sa_exahusted IRQ. + * Upon receiving tx_sa_exhausted IRQ, upper layer should initiate + * key-negotiation, and load a new SA to replace the exhausted one. + * + * @param adapter Pointer to eth's private data-structure. The + * adapter is used in order to extract the base + * macsec configuration address. + * + * @returns index of the exhausted SA. + */ +unsigned int al_eth_macsec_read_exhausted_tx_sa_index (struct al_hal_eth_adapter *adapter); + + +/** + * @brief this function returns the index of the last SA which has been exhausted. + * It should be called upon receiving a rx_sa_exahusted IRQ. + * Upon receiving rx_sa_exhausted IRQ, upper layer should initiate + * key-negotiation, and load a new SA to replace the exhausted one. + * + * @param adapter Pointer to eth's private data-structure. The + * adapter is used in order to extract the base + * macsec configuration address. + * + * @returns index of the exhausted SA. + */ +unsigned int al_eth_macsec_read_exhausted_rx_sa_index (struct al_hal_eth_adapter *adapter); + + +/** + * @brief this function reads the MacSec engine's statistics counters + * + * @param adapter Pointer to eth's private data-structure. The + * adapter is used in order to extract the base + * macsec configuration address. + * @param stats_cntrs a struct filled by the function with statistics + * counters values + */ +void al_eth_macsec_read_stats_cntr (struct al_hal_eth_adapter *adapter, + struct al_eth_macsec_stats_cntr *stats_cntrs); + + +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ +#endif /* __AL_HAL_ETH_MACSEC_H__ */ +/** @} end of Ethernet Macsec group */ diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_macsec_regs.h b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_macsec_regs.h new file mode 100644 index 00000000000000..38d0cbcced2094 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_macsec_regs.h @@ -0,0 +1,348 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @{ + * @file al_hal_eth_macsec_regs.h + * + * @brief ... registers + * + */ + +#ifndef __AL_HAL_ETH_MACSEC_REGS_H +#define __AL_HAL_ETH_MACSEC_REGS_H + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + + + +struct al_macsec_configuration { + /* [0x0] TX MACSEC configuration */ + uint32_t conf_macsec_tx; + /* [0x4] TX MACSEC PN thrshold */ + uint32_t conf_macsec_tx_pn_thr; + /* [0x8] RX MACsec configuration */ + uint32_t conf_macsec_rx; + /* [0xc] RX MACsec PN threshold */ + uint32_t conf_macsec_rx_pn_thr; + /* [0x10] Ethertype field */ + uint32_t conf_macsec_ethertype; + /* [0x14] sectag header location in parser bus */ + uint32_t conf_macsec_rx_parse_info_sectag_header; + /* [0x18] source mac location in parser bus */ + uint32_t conf_macsec_rx_parse_info_source_mac; + /* [0x1c] dest mac location in parser bus */ + uint32_t conf_macsec_rx_parse_info_dest_mac; + /* [0x20] sectag offset location in parser bus */ + uint32_t conf_macsec_rx_parse_info_sectag_offset; + /* [0x24] auth start location */ + uint32_t conf_macsec_auth_start; + uint32_t rsrvd[10]; +}; +struct al_macsec_sectag_encoding { + /* [0x0] Table's address3-bit address is encoded as {ICV_siz ... */ + uint32_t sectag_encoding_control; + /* [0x4] Table's data: +{TCI_E, TCI_C} */ + uint32_t sectag_encoding_line; +}; +struct al_macsec_sectag_decoding { + /* [0x0] Table's address6-bit address is decoded as SECTAG_T ... */ + uint32_t sectag_decoding_control; + /* [0x4] Table's data:{sci_sel[1:0], bypass, drop, icv_size[ ... */ + uint32_t sectag_decoding_line; +}; +struct al_macsec_cache_access_tx_sad_control { + /* [0x0] TX_SAD's address & control bits */ + uint32_t tx_sad_control; +}; +struct al_macsec_cache_access_tx_sad_data { + /* [0x0] Tx SAD's data */ + uint32_t tx_sad_data; +}; +struct al_macsec_cache_access_rx_sad_control { + /* [0x0] RX_SAD's address & control bits */ + uint32_t rx_sad_control; +}; +struct al_macsec_cache_access_rx_sad_data { + /* [0x0] Rx SAD's data */ + uint32_t rx_sad_data; +}; +struct al_macsec_cam_access_rx_sci_build_control { + /* [0x0] CAM's address */ + uint32_t sci_build_control; +}; +struct al_macsec_cam_access_rx_sci_build_data { + /* [0x0] */ + uint32_t sci_build_data; +}; +struct al_macsec_cam_access_rx_sa_index_build_control { + /* [0x0] CAM's address */ + uint32_t sa_index_build_control; +}; +struct al_macsec_cam_access_rx_sa_index_build_data { + /* [0x0] CAM's data CAM line is written only when perfrom_co ... */ + uint32_t sa_index_build_data; +}; +struct al_macsec_status { + /* [0x0] TxMACsec configuration */ + uint32_t status_macsec_tx; + /* [0x4] Rx MACsec configuration */ + uint32_t status_macsec_rx; + uint32_t rsrvd[0]; +}; +struct al_macsec_statistics { + /* [0x0] Number of Tx packets */ + uint32_t macsec_tx_pkts; + /* [0x4] Number of controlled (secured) Tx packets */ + uint32_t macsec_tx_controlled_pkts; + /* [0x8] Number of uncontrolled (unsecured) Tx packets */ + uint32_t macsec_tx_uncontrolled_pkts; + /* [0xc] Number of dropped Tx packets */ + uint32_t macsec_tx_dropped_pkts; + /* [0x10] Number of Rx packets */ + uint32_t macsec_rx_pkts; + /* [0x14] Number of controlled (secured) Rx packets */ + uint32_t macsec_rx_controlled_pkts; + /* [0x18] Number of uncontrolled (unsecured) Rx packets */ + uint32_t macsec_rx_uncontrolled_pkts; + /* [0x1c] Number of dropped Rx packets */ + uint32_t macsec_rx_dropped_pkts; + /* [0x20] Number of detected Rx replay packets */ + uint32_t macsec_rx_replay_pkts; + /* [0x24] Number of Rx packets that failed authentication */ + uint32_t macsec_rx_auth_fail_pkts; +}; + +struct al_macsec_regs { + struct al_macsec_configuration configuration; /* [0x0] */ + struct al_macsec_sectag_encoding sectag_encoding; /* [0x50] */ + uint32_t rsrvd_0[2]; + struct al_macsec_sectag_decoding sectag_decoding; /* [0x60] */ + struct al_macsec_cache_access_tx_sad_control cache_access_tx_sad_control; /* [0x68] */ + struct al_macsec_cache_access_tx_sad_data cache_access_tx_sad_data[16]; /* [0x6c] */ + uint32_t rsrvd_1; + struct al_macsec_cache_access_rx_sad_control cache_access_rx_sad_control; /* [0xb0] */ + struct al_macsec_cache_access_rx_sad_data cache_access_rx_sad_data[16]; /* [0xb4] */ + struct al_macsec_cam_access_rx_sci_build_control cam_access_rx_sci_build_control; /* [0xf4] */ + struct al_macsec_cam_access_rx_sci_build_data cam_access_rx_sci_build_data[8]; /* [0xf8] */ + struct al_macsec_cam_access_rx_sa_index_build_control cam_access_rx_sa_index_build_control; /* [0x118] */ + struct al_macsec_cam_access_rx_sa_index_build_data cam_access_rx_sa_index_build_data[5]; /* [0x11c] */ + struct al_macsec_status status; /* [0x130] */ + uint32_t rsrvd_2[2]; + struct al_macsec_statistics statistics; /* [0x140] */ +}; + + +/* +* Registers Fields +*/ + + +/**** conf_macsec_tx register ****/ +/* 1 - MACsec mechanism enabled for TX packets0 - Bypass Tx MACs ... */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_TX_ENABLE_ENGINE (1 << 0) +/* Number of Security Associations per Secure Channel1'b0 - 2 SA ... */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_TX_NUM_SA_PER_SC (1 << 1) +/* If set, VF_ind will not be used in addressing the TX_SAD. */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_TX_DIRECT_ACCESS (1 << 2) +/* Enable dropping of Tx packets */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_TX_DROP_EN (1 << 3) +/* Drop all Tx packets arriving at the MACsec accelerator */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_TX_DROP_ALL (1 << 4) +/* Enable "smart" clock gating in AES */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_TX_CLK_GATE_EN (1 << 5) +/* 4 - AES_MODE_ECB5 - AES_MODE_CBC6 - AES_MODE_CTR;7 - AES_MODE ... */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_TX_CRYPTO_ENGINE_AES_MODE_MASK 0x000003C0 +#define MACSEC_CONFIGURATION_CONF_MACSEC_TX_CRYPTO_ENGINE_AES_MODE_SHIFT 6 +/* 0 - 16 bit +1 - 32 bit +2 - 64 bit +3 - 128 bit */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_TX_CRYPTO_ENGINE_AES_CNTR_STEP_MASK 0x00000C00 +#define MACSEC_CONFIGURATION_CONF_MACSEC_TX_CRYPTO_ENGINE_AES_CNTR_STEP_SHIFT 10 +/* Clear AP_ALIGNER */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_TX_DEBUG_CLR_ASSEMBLER (1 << 30) +/* Clear AP_SPLITTER */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_TX_DEBUG_CLR_SPLITTER (1 << 31) + +/**** conf_macsec_rx register ****/ +/* 1 - MACsec mechanism enabled for Rx packets0 - Bypass Rx MACs ... */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_ENABLE_ENGINE (1 << 0) +/* Number of Security Associations per Secure Channel1'b0 - 2 Sa ... */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_NUM_SA_PER_SC (1 << 1) +/* If set, VF_ind will not be used in addressing the RX_SAD. */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_DIRECT_ACCESS (1 << 2) +/* Enable dropping of Rx packets. */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_DROP_EN (1 << 3) +/* Drop all Rx packets arriving at the MACsec accelerator. */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_DROP_ALL (1 << 4) +/* Enable "smart" clock gating in AES */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_CLK_GATE_EN (1 << 5) +/* 4 - AES_MODE_ECB5 - AES_MODE_CBC6 - AES_MODE_CTR;7 - AES_MODE ... */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_CRYPTO_ENGINE_AES_MODE_MASK 0x000003C0 +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_CRYPTO_ENGINE_AES_MODE_SHIFT 6 +/* 0 - 16 bit +1 - 32 bit +2 - 64 bit +3 - 128 bit */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_CRYPTO_ENGINE_AES_CNTR_STEP_MASK 0x00000C00 +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_CRYPTO_ENGINE_AES_CNTR_STEP_SHIFT 10 +/* Method of SCI creation:0 - according to Sectag Parsing Table ... */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_SCI_SEL_MASK 0x00003000 +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_SCI_SEL_SHIFT 12 +/* if set, take sectag start offset from parser results, otherwi ... */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_TUNNEL_ENABLE (1 << 14) +/* Clear AP_ALIGNER */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_DEBUG_CLR_ASSEMBLER (1 << 30) +/* Clear AP_SPLITTER */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_RX_DEBUG_CLR_SPLITTER (1 << 31) + +/**** conf_macsec_ethertype register ****/ +/* MACsec Ethertype constant */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_ETHERTYPE_DATA_MASK 0x0000FFFF +#define MACSEC_CONFIGURATION_CONF_MACSEC_ETHERTYPE_DATA_SHIFT 0 + +/**** conf_macsec_auth_start register ****/ +/* 2'b00 - auth starts at sop2'b01 - auth starts according to au ... */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_AUTH_START_CTRL_MASK 0x00000003 +#define MACSEC_CONFIGURATION_CONF_MACSEC_AUTH_START_CTRL_SHIFT 0 +/* auth start offset (relevant if ctrl == 2'b01) */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_AUTH_START_FROM_START_MASK 0x0003FFFC +#define MACSEC_CONFIGURATION_CONF_MACSEC_AUTH_START_FROM_START_SHIFT 2 +/* auth start location relative to sectag header location (relev ... */ +#define MACSEC_CONFIGURATION_CONF_MACSEC_AUTH_START_FROM_SECTAG_MASK 0xFFFC0000 +#define MACSEC_CONFIGURATION_CONF_MACSEC_AUTH_START_FROM_SECTAG_SHIFT 18 + +/**** sectag_encoding_control register ****/ +/* Address */ +#define MACSEC_SECTAG_ENCODING_SECTAG_ENCODING_CONTROL_ADDR_MASK 0x00000007 +#define MACSEC_SECTAG_ENCODING_SECTAG_ENCODING_CONTROL_ADDR_SHIFT 0 +/* copy data registers content to relevant line in table */ +#define MACSEC_SECTAG_ENCODING_SECTAG_ENCODING_CONTROL_PERFORM_WRITE (1 << 8) +/* copy relevant line in table to data registers */ +#define MACSEC_SECTAG_ENCODING_SECTAG_ENCODING_CONTROL_PERFORM_READ (1 << 9) + +/**** sectag_encoding_line register ****/ +/* Data */ +#define MACSEC_SECTAG_ENCODING_SECTAG_ENCODING_LINE_DATA_MASK 0x00000003 +#define MACSEC_SECTAG_ENCODING_SECTAG_ENCODING_LINE_DATA_SHIFT 0 + +/**** sectag_decoding_control register ****/ +/* Address */ +#define MACSEC_SECTAG_DECODING_SECTAG_DECODING_CONTROL_ADDR_MASK 0x0000003F +#define MACSEC_SECTAG_DECODING_SECTAG_DECODING_CONTROL_ADDR_SHIFT 0 +/* copy data registers content to relevant line in table */ +#define MACSEC_SECTAG_DECODING_SECTAG_DECODING_CONTROL_PERFORM_WRITE (1 << 8) +/* copy relevant line in table to data registers */ +#define MACSEC_SECTAG_DECODING_SECTAG_DECODING_CONTROL_PERFORM_READ (1 << 9) + +/**** sectag_decoding_line register ****/ +/* Data */ +#define MACSEC_SECTAG_DECODING_SECTAG_DECODING_LINE_DATA_MASK 0x000007FF +#define MACSEC_SECTAG_DECODING_SECTAG_DECODING_LINE_DATA_SHIFT 0 + +/**** tx_sad_control register ****/ +/* Address */ +#define MACSEC_CACHE_ACCESS_TX_SAD_CONTROL_TX_SAD_CONTROL_ADDR_MASK 0x0000007F +#define MACSEC_CACHE_ACCESS_TX_SAD_CONTROL_TX_SAD_CONTROL_ADDR_SHIFT 0 +/* copy data registers content to relevant line in table */ +#define MACSEC_CACHE_ACCESS_TX_SAD_CONTROL_TX_SAD_CONTROL_PERFORM_WRITE (1 << 8) +/* copy relevant line in table to data registers */ +#define MACSEC_CACHE_ACCESS_TX_SAD_CONTROL_TX_SAD_CONTROL_PERFORM_READ (1 << 9) + +/**** rx_sad_control register ****/ +/* Address */ +#define MACSEC_CACHE_ACCESS_RX_SAD_CONTROL_RX_SAD_CONTROL_ADDR_MASK 0x0000007F +#define MACSEC_CACHE_ACCESS_RX_SAD_CONTROL_RX_SAD_CONTROL_ADDR_SHIFT 0 +/* copy data registers content to relevant line in table */ +#define MACSEC_CACHE_ACCESS_RX_SAD_CONTROL_RX_SAD_CONTROL_PERFORM_WRITE (1 << 8) +/* copy relevant line in table to data registers */ +#define MACSEC_CACHE_ACCESS_RX_SAD_CONTROL_RX_SAD_CONTROL_PERFORM_READ (1 << 9) + +/**** sci_build_control register ****/ +/* Address */ +#define MACSEC_CAM_ACCESS_RX_SCI_BUILD_CONTROL_SCI_BUILD_CONTROL_ADDR_MASK 0x0000000F +#define MACSEC_CAM_ACCESS_RX_SCI_BUILD_CONTROL_SCI_BUILD_CONTROL_ADDR_SHIFT 0 +/* copy data registers content to relevant line in table */ +#define MACSEC_CAM_ACCESS_RX_SCI_BUILD_CONTROL_SCI_BUILD_CONTROL_PERFORM_WRITE (1 << 8) +/* copy relevant line in table to data registers */ +#define MACSEC_CAM_ACCESS_RX_SCI_BUILD_CONTROL_SCI_BUILD_CONTROL_PERFORM_READ (1 << 9) + +/**** sa_index_build_control register ****/ +/* Address */ +#define MACSEC_CAM_ACCESS_RX_SA_INDEX_BUILD_CONTROL_SA_INDEX_BUILD_CONTROL_ADDR_MASK 0x0000003F +#define MACSEC_CAM_ACCESS_RX_SA_INDEX_BUILD_CONTROL_SA_INDEX_BUILD_CONTROL_ADDR_SHIFT 0 +/* copy data registers to relevant line in table */ +#define MACSEC_CAM_ACCESS_RX_SA_INDEX_BUILD_CONTROL_SA_INDEX_BUILD_CONTROL_PERFORM_WRITE (1 << 8) +/* copy relevant line in table to data registers */ +#define MACSEC_CAM_ACCESS_RX_SA_INDEX_BUILD_CONTROL_SA_INDEX_BUILD_CONTROL_PERFORM_READ (1 << 9) + +/**** status_macsec_tx register ****/ +/* SA index of Tx packet that reached PN threshold */ +#define MACSEC_STATUS_STATUS_MACSEC_TX_PN_THR_SA_INDEX_MASK 0x000000FF +#define MACSEC_STATUS_STATUS_MACSEC_TX_PN_THR_SA_INDEX_SHIFT 0 +/* macsec_tx_tmo FSM state */ +#define MACSEC_STATUS_STATUS_MACSEC_TX_TMO_STATE_MASK 0x00000F00 +#define MACSEC_STATUS_STATUS_MACSEC_TX_TMO_STATE_SHIFT 8 +/* macsec_tx_tmi FSM state */ +#define MACSEC_STATUS_STATUS_MACSEC_TX_TMI_STATE_MASK 0x0000F000 +#define MACSEC_STATUS_STATUS_MACSEC_TX_TMI_STATE_SHIFT 12 + +/**** status_macsec_rx register ****/ +/* SA index of Rx packet that reached PN threshold */ +#define MACSEC_STATUS_STATUS_MACSEC_RX_PN_THR_SA_INDEX_MASK 0x000000FF +#define MACSEC_STATUS_STATUS_MACSEC_RX_PN_THR_SA_INDEX_SHIFT 0 +/* macsec_rx_rmo FSM state */ +#define MACSEC_STATUS_STATUS_MACSEC_RX_RMO_STATE_MASK 0x00000F00 +#define MACSEC_STATUS_STATUS_MACSEC_RX_RMO_STATE_SHIFT 8 +/* macsec_rx_rmi FSM state */ +#define MACSEC_STATUS_STATUS_MACSEC_RX_RMI_STATE_MASK 0x0000F000 +#define MACSEC_STATUS_STATUS_MACSEC_RX_RMI_STATE_SHIFT 12 + +#ifdef __cplusplus +} +#endif + +#endif /* __AL_HAL_ETH_MACSEC_REGS_H */ + +/** @} end of ... group */ + + diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_main.c b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_main.c new file mode 100644 index 00000000000000..0e8b441aeae5a7 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_hal_eth_main.c @@ -0,0 +1,5131 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/** + * @{ + * @file al_hal_eth_main.c + * + * @brief XG Ethernet unit HAL driver for main functions (initialization, data path) + * + */ + +#include +#include +#include +#include "al_hal_eth.h" +#include "al_hal_eth_ec_regs.h" +#include "al_hal_eth_mac_regs.h" +#ifdef AL_ETH_SUPPORT_DDP +#include "al_hal_eth_ddp_internal.h" +#endif + +/* Number of clocks to stop the Tx MAC interface after getting out of EEE mode */ +#define AL_ETH_EEE_STOP_CNT 100000 + +#define AL_ETH_TX_PKT_UDMA_FLAGS (AL_ETH_TX_FLAGS_NO_SNOOP | \ + AL_ETH_TX_FLAGS_INT) + +#define AL_ETH_TX_PKT_META_FLAGS (AL_ETH_TX_FLAGS_IPV4_L3_CSUM | \ + AL_ETH_TX_FLAGS_L4_CSUM | \ + AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM | \ + AL_ETH_TX_FLAGS_L2_MACSEC_PKT | \ + AL_ETH_TX_FLAGS_L2_DIS_FCS |\ + AL_ETH_TX_FLAGS_TSO |\ + AL_ETH_TX_FLAGS_TS) + +#define AL_ETH_TX_SRC_VLAN_CNT_MASK 3 +#define AL_ETH_TX_SRC_VLAN_CNT_SHIFT 5 +#define AL_ETH_TX_L4_PROTO_IDX_MASK 0x1F +#define AL_ETH_TX_L4_PROTO_IDX_SHIFT 8 +#define AL_ETH_TX_TUNNEL_MODE_SHIFT 18 +#define AL_ETH_TX_OUTER_L3_PROTO_SHIFT 20 +#define AL_ETH_TX_VLAN_MOD_ADD_SHIFT 22 +#define AL_ETH_TX_VLAN_MOD_DEL_SHIFT 24 +#define AL_ETH_TX_VLAN_MOD_E_SEL_SHIFT 26 +#define AL_ETH_TX_VLAN_MOD_VID_SEL_SHIFT 28 +#define AL_ETH_TX_VLAN_MOD_PBIT_SEL_SHIFT 30 + +/* tx Meta Descriptor defines */ +#define AL_ETH_TX_META_STORE (1 << 21) +#define AL_ETH_TX_META_L3_LEN_MASK 0xff +#define AL_ETH_TX_META_L3_OFF_MASK 0xff +#define AL_ETH_TX_META_L3_OFF_SHIFT 8 +#define AL_ETH_TX_META_MSS_LSB_VAL_SHIFT 22 +#define AL_ETH_TX_META_MSS_MSB_TS_VAL_SHIFT 16 +#define AL_ETH_TX_META_OUTER_L3_LEN_MASK 0x1f +#define AL_ETH_TX_META_OUTER_L3_LEN_SHIFT 24 +#define AL_ETH_TX_META_OUTER_L3_OFF_HIGH_MASK 0x18 +#define AL_ETH_TX_META_OUTER_L3_OFF_HIGH_SHIFT 10 +#define AL_ETH_TX_META_OUTER_L3_OFF_LOW_MASK 0x07 +#define AL_ETH_TX_META_OUTER_L3_OFF_LOW_SHIFT 29 + +/* tx Meta Descriptor defines - MacSec */ +#define AL_ETH_TX_MACSEC_SIGN_SHIFT 0 /* Sign TX pkt */ +#define AL_ETH_TX_MACSEC_ENCRYPT_SHIFT 1 /* Encrypt TX pkt */ +#define AL_ETH_TX_MACSEC_AN_LSB_SHIFT 2 /* Association Number */ +#define AL_ETH_TX_MACSEC_AN_MSB_SHIFT 3 +#define AL_ETH_TX_MACSEC_SC_LSB_SHIFT 4 /* Secured Channel */ +#define AL_ETH_TX_MACSEC_SC_MSB_SHIFT 9 +#define AL_ETH_TX_MACSEC_SECURED_PYLD_LEN_LSB_SHIFT 10 /* Secure Payload Length (0x3FFF for non-SL packets) */ +#define AL_ETH_TX_MACSEC_SECURED_PYLD_LEN_MSB_SHIFT 23 + +/* Rx Descriptor defines */ +#define AL_ETH_RX_L3_PROTO_IDX_MASK 0x1F +#define AL_ETH_RX_SRC_VLAN_CNT_MASK 3 +#define AL_ETH_RX_SRC_VLAN_CNT_SHIFT 5 +#define AL_ETH_RX_L4_PROTO_IDX_MASK 0x1F +#define AL_ETH_RX_L4_PROTO_IDX_SHIFT 8 + +#define AL_ETH_RX_L3_OFFSET_SHIFT 9 +#define AL_ETH_RX_L3_OFFSET_MASK (0x7f << AL_ETH_RX_L3_OFFSET_SHIFT) +#define AL_ETH_RX_HASH_SHIFT 16 +#define AL_ETH_RX_HASH_MASK (0xffff << AL_ETH_RX_HASH_SHIFT) +#define AL_S2M_DESC_LEN2_SHIFT 16 +#define AL_S2M_DESC_LEN2_MASK (0xff << AL_S2M_DESC_LEN2_SHIFT) + +#define ETH_MAC_GEN_LED_CFG_BLINK_TIMER_VAL 5 +#define ETH_MAC_GEN_LED_CFG_ACT_TIMER_VAL 7 + +/* Tx VID Table*/ +#define AL_ETH_TX_VLAN_TABLE_UDMA_MASK 0xF +#define AL_ETH_TX_VLAN_TABLE_FWD_TO_MAC (1 << 4) + +/* tx gpd defines */ +#define AL_ETH_TX_GPD_L3_PROTO_MASK 0x1f +#define AL_ETH_TX_GPD_L3_PROTO_SHIFT 0 +#define AL_ETH_TX_GPD_L4_PROTO_MASK 0x1f +#define AL_ETH_TX_GPD_L4_PROTO_SHIFT 5 +#define AL_ETH_TX_GPD_TUNNEL_CTRL_MASK 0x7 +#define AL_ETH_TX_GPD_TUNNEL_CTRL_SHIFT 10 +#define AL_ETH_TX_GPD_SRC_VLAN_CNT_MASK 0x3 +#define AL_ETH_TX_GPD_SRC_VLAN_CNT_SHIFT 13 +#define AL_ETH_TX_GPD_CAM_DATA_2_SHIFT 32 +#define AL_ETH_TX_GPD_CAM_MASK_2_SHIFT 32 +#define AL_ETH_TX_GPD_CAM_CTRL_VALID_SHIFT 31 + +/* tx gcp defines */ +#define AL_ETH_TX_GCP_POLY_SEL_MASK 0x1 +#define AL_ETH_TX_GCP_POLY_SEL_SHIFT 0 +#define AL_ETH_TX_GCP_CRC32_BIT_COMP_MASK 0x1 +#define AL_ETH_TX_GCP_CRC32_BIT_COMP_SHIFT 1 +#define AL_ETH_TX_GCP_CRC32_BIT_SWAP_MASK 0x1 +#define AL_ETH_TX_GCP_CRC32_BIT_SWAP_SHIFT 2 +#define AL_ETH_TX_GCP_CRC32_BYTE_SWAP_MASK 0x1 +#define AL_ETH_TX_GCP_CRC32_BYTE_SWAP_SHIFT 3 +#define AL_ETH_TX_GCP_DATA_BIT_SWAP_MASK 0x1 +#define AL_ETH_TX_GCP_DATA_BIT_SWAP_SHIFT 4 +#define AL_ETH_TX_GCP_DATA_BYTE_SWAP_MASK 0x1 +#define AL_ETH_TX_GCP_DATA_BYTE_SWAP_SHIFT 5 +#define AL_ETH_TX_GCP_TRAIL_SIZE_MASK 0xF +#define AL_ETH_TX_GCP_TRAIL_SIZE_SHIFT 6 +#define AL_ETH_TX_GCP_HEAD_SIZE_MASK 0xFF +#define AL_ETH_TX_GCP_HEAD_SIZE_SHIFT 16 +#define AL_ETH_TX_GCP_HEAD_CALC_MASK 0x1 +#define AL_ETH_TX_GCP_HEAD_CALC_SHIFT 24 +#define AL_ETH_TX_GCP_MASK_POLARITY_MASK 0x1 +#define AL_ETH_TX_GCP_MASK_POLARITY_SHIFT 25 + +#define AL_ETH_TX_GCP_OPCODE_1_MASK 0x3F +#define AL_ETH_TX_GCP_OPCODE_1_SHIFT 0 +#define AL_ETH_TX_GCP_OPCODE_2_MASK 0x3F +#define AL_ETH_TX_GCP_OPCODE_2_SHIFT 6 +#define AL_ETH_TX_GCP_OPCODE_3_MASK 0x3F +#define AL_ETH_TX_GCP_OPCODE_3_SHIFT 12 +#define AL_ETH_TX_GCP_OPSEL_1_MASK 0xF +#define AL_ETH_TX_GCP_OPSEL_1_SHIFT 0 +#define AL_ETH_TX_GCP_OPSEL_2_MASK 0xF +#define AL_ETH_TX_GCP_OPSEL_2_SHIFT 4 +#define AL_ETH_TX_GCP_OPSEL_3_MASK 0xF +#define AL_ETH_TX_GCP_OPSEL_3_SHIFT 8 +#define AL_ETH_TX_GCP_OPSEL_4_MASK 0xF +#define AL_ETH_TX_GCP_OPSEL_4_SHIFT 12 + +/* Tx crc_chksum_replace defines */ +#define L4_CHECKSUM_DIS_AND_L3_CHECKSUM_DIS 0x00 +#define L4_CHECKSUM_DIS_AND_L3_CHECKSUM_EN 0x20 +#define L4_CHECKSUM_EN_AND_L3_CHECKSUM_DIS 0x40 +#define L4_CHECKSUM_EN_AND_L3_CHECKSUM_EN 0x60 + +/* rx gpd defines */ +#define AL_ETH_RX_GPD_OUTER_L3_PROTO_MASK 0x1f +#define AL_ETH_RX_GPD_OUTER_L3_PROTO_SHIFT (3 + 0) +#define AL_ETH_RX_GPD_OUTER_L4_PROTO_MASK 0x1f +#define AL_ETH_RX_GPD_OUTER_L4_PROTO_SHIFT (3 + 8) +#define AL_ETH_RX_GPD_INNER_L3_PROTO_MASK 0x1f +#define AL_ETH_RX_GPD_INNER_L3_PROTO_SHIFT (3 + 16) +#define AL_ETH_RX_GPD_INNER_L4_PROTO_MASK 0x1f +#define AL_ETH_RX_GPD_INNER_L4_PROTO_SHIFT (3 + 24) +#define AL_ETH_RX_GPD_OUTER_PARSE_CTRL_MASK 0xFF +#define AL_ETH_RX_GPD_OUTER_PARSE_CTRL_SHIFT 32 +#define AL_ETH_RX_GPD_INNER_PARSE_CTRL_MASK 0xFF +#define AL_ETH_RX_GPD_INNER_PARSE_CTRL_SHIFT 40 +#define AL_ETH_RX_GPD_L3_PRIORITY_MASK 0xFF +#define AL_ETH_RX_GPD_L3_PRIORITY_SHIFT 48 +#define AL_ETH_RX_GPD_L4_DST_PORT_LSB_MASK 0xFF +#define AL_ETH_RX_GPD_L4_DST_PORT_LSB_SHIFT 56 +#define AL_ETH_RX_GPD_CAM_DATA_2_SHIFT 32 +#define AL_ETH_RX_GPD_CAM_MASK_2_SHIFT 32 +#define AL_ETH_RX_GPD_CAM_CTRL_VALID_SHIFT 31 + +#define AL_ETH_RX_GPD_PARSE_RESULT_OUTER_L3_PROTO_IDX_OFFSET (106 + 5) +#define AL_ETH_RX_GPD_PARSE_RESULT_OUTER_L4_PROTO_IDX_OFFSET (106 + 10) +#define AL_ETH_RX_GPD_PARSE_RESULT_INNER_L3_PROTO_IDX_OFFSET (0 + 5) +#define AL_ETH_RX_GPD_PARSE_RESULT_INNER_L4_PROTO_IDX_OFFSET (0 + 10) +#define AL_ETH_RX_GPD_PARSE_RESULT_OUTER_PARSE_CTRL (106 + 4) +#define AL_ETH_RX_GPD_PARSE_RESULT_INNER_PARSE_CTRL 4 +#define AL_ETH_RX_GPD_PARSE_RESULT_L3_PRIORITY (106 + 13) +#define AL_ETH_RX_GPD_PARSE_RESULT_OUTER_L4_DST_PORT_LSB (106 + 65) + +/* rx gcp defines */ +#define AL_ETH_RX_GCP_POLY_SEL_MASK 0x1 +#define AL_ETH_RX_GCP_POLY_SEL_SHIFT 0 +#define AL_ETH_RX_GCP_CRC32_BIT_COMP_MASK 0x1 +#define AL_ETH_RX_GCP_CRC32_BIT_COMP_SHIFT 1 +#define AL_ETH_RX_GCP_CRC32_BIT_SWAP_MASK 0x1 +#define AL_ETH_RX_GCP_CRC32_BIT_SWAP_SHIFT 2 +#define AL_ETH_RX_GCP_CRC32_BYTE_SWAP_MASK 0x1 +#define AL_ETH_RX_GCP_CRC32_BYTE_SWAP_SHIFT 3 +#define AL_ETH_RX_GCP_DATA_BIT_SWAP_MASK 0x1 +#define AL_ETH_RX_GCP_DATA_BIT_SWAP_SHIFT 4 +#define AL_ETH_RX_GCP_DATA_BYTE_SWAP_MASK 0x1 +#define AL_ETH_RX_GCP_DATA_BYTE_SWAP_SHIFT 5 +#define AL_ETH_RX_GCP_TRAIL_SIZE_MASK 0xF +#define AL_ETH_RX_GCP_TRAIL_SIZE_SHIFT 6 +#define AL_ETH_RX_GCP_HEAD_SIZE_MASK 0xFF +#define AL_ETH_RX_GCP_HEAD_SIZE_SHIFT 16 +#define AL_ETH_RX_GCP_HEAD_CALC_MASK 0x1 +#define AL_ETH_RX_GCP_HEAD_CALC_SHIFT 24 +#define AL_ETH_RX_GCP_MASK_POLARITY_MASK 0x1 +#define AL_ETH_RX_GCP_MASK_POLARITY_SHIFT 25 + +#define AL_ETH_RX_GCP_OPCODE_1_MASK 0x3F +#define AL_ETH_RX_GCP_OPCODE_1_SHIFT 0 +#define AL_ETH_RX_GCP_OPCODE_2_MASK 0x3F +#define AL_ETH_RX_GCP_OPCODE_2_SHIFT 6 +#define AL_ETH_RX_GCP_OPCODE_3_MASK 0x3F +#define AL_ETH_RX_GCP_OPCODE_3_SHIFT 12 +#define AL_ETH_RX_GCP_OPSEL_1_MASK 0xF +#define AL_ETH_RX_GCP_OPSEL_1_SHIFT 0 +#define AL_ETH_RX_GCP_OPSEL_2_MASK 0xF +#define AL_ETH_RX_GCP_OPSEL_2_SHIFT 4 +#define AL_ETH_RX_GCP_OPSEL_3_MASK 0xF +#define AL_ETH_RX_GCP_OPSEL_3_SHIFT 8 +#define AL_ETH_RX_GCP_OPSEL_4_MASK 0xF +#define AL_ETH_RX_GCP_OPSEL_4_SHIFT 12 + +#define AL_ETH_MDIO_DELAY_PERIOD 1 /* micro seconds to wait when polling mdio status */ +#define AL_ETH_MDIO_DELAY_COUNT 150 /* number of times to poll */ +#define AL_ETH_S2M_UDMA_COMP_COAL_TIMEOUT 200 /* Rx descriptors coalescing timeout in SB clocks */ + +#define AL_ETH_EPE_ENTRIES_NUM 26 +static struct al_eth_epe_p_reg_entry al_eth_epe_p_regs[AL_ETH_EPE_ENTRIES_NUM] = { + { 0x0, 0x0, 0x0 }, + { 0x0, 0x0, 0x1 }, + { 0x0, 0x0, 0x2 }, + { 0x0, 0x0, 0x3 }, + { 0x18100, 0xFFFFF, 0x80000004 }, + { 0x188A8, 0xFFFFF, 0x80000005 }, + { 0x99100, 0xFFFFF, 0x80000006 }, + { 0x98100, 0xFFFFF, 0x80000007 }, + { 0x10800, 0x7FFFF, 0x80000008 }, + { 0x20000, 0x73FFF, 0x80000009 }, + { 0x20000, 0x70000, 0x8000000A }, + { 0x186DD, 0x7FFFF, 0x8000000B }, + { 0x30600, 0x7FF00, 0x8000000C }, + { 0x31100, 0x7FF00, 0x8000000D }, + { 0x32F00, 0x7FF00, 0x8000000E }, + { 0x32900, 0x7FF00, 0x8000000F }, + { 0x105DC, 0x7FFFF, 0x80010010 }, + { 0x18864, 0x7FFFF, 0x80000011 }, + { 0x72000, 0x72000, 0x80000012 }, + { 0x70000, 0x72000, 0x80000013 }, + { 0x46558, 0x7FFFF, 0x80000001 }, + { 0x18906, 0x7FFFF, 0x80000015 }, + { 0x18915, 0x7FFFF, 0x80000016 }, + { 0x31B00, 0x7FF00, 0x80000017 }, + { 0x30400, 0x7FF00, 0x80000018 }, + { 0x0, 0x0, 0x8000001F } +}; + + +static struct al_eth_epe_control_entry al_eth_epe_control_table[AL_ETH_EPE_ENTRIES_NUM] = { + {{ 0x2800000, 0x0, 0x0, 0x0, 0x1, 0x400000 }}, + {{ 0x280004C, 0x746000, 0xA46030, 0xE00000, 0x2, 0x400000 }}, + {{ 0x2800054, 0x746000, 0xA46030, 0x1600000, 0x2, 0x400000 }}, + {{ 0x280005C, 0x746000, 0xA46030, 0x1E00000, 0x2, 0x400000 }}, + {{ 0x2800042, 0xD42000, 0x0, 0x400000, 0x1010412, 0x400000 }}, + {{ 0x2800042, 0xD42000, 0x0, 0x400000, 0x1010412, 0x400000 }}, + {{ 0x2800042, 0xE42000, 0x0, 0x400000, 0x2020002, 0x400000 }}, + {{ 0x2800042, 0xE42000, 0x0, 0x400000, 0x2020002, 0x400000 }}, + {{ 0x280B046, 0x0, 0x6C1008, 0x0, 0x4, 0x406800 }}, + {{ 0x2800049, 0xF44060, 0x1744080, 0x14404, 0x6, 0x400011 }}, + {{ 0x2015049, 0xF44060, 0x1744080, 0x14404, 0x8080007, 0x400011 }}, + {{ 0x280B046, 0xF60040, 0x6C1004, 0x2800000, 0x6, 0x406811 }}, + {{ 0x2815042, 0x1F42000, 0x2042010, 0x1414460, 0x10100009, 0x40B800 }}, + {{ 0x2815042, 0x1F42000, 0x2042010, 0x800000, 0x10100009, 0x40B800 }}, + {{ 0x280B042, 0x0, 0x0, 0x430400, 0x4040009, 0x0 }}, + {{ 0x2815580, 0x0, 0x0, 0x0, 0x4040005, 0x0 }}, + {{ 0x280B000, 0x0, 0x0, 0x0, 0x1, 0x400000 }}, + {{ 0x280B400, 0x0, 0x0, 0x800000, 0x4040003, 0}}, + {{ 0x280B000, 0x0, 0x0, 0x600000, 0x1, 0x406800 }}, + {{ 0x280B000, 0x0, 0x0, 0xE00000, 0x1, 0x406800 }}, + {{ 0x2800000, 0x0, 0x0, 0x0, 0x1, 0x400000 }}, + {{ 0x280B046, 0x0, 0x0, 0x2800000, 0x7, 0x400000 }}, + {{ 0x280B046, 0xF60040, 0x6C1004, 0x2800000, 0x6, 0x406811 }}, + {{ 0x2815042, 0x1F43028, 0x2000000, 0xC00000, 0x10100009, 0x40B800 }}, + {{ 0x2815400, 0x0, 0x0, 0x0, 0x4040005, 0x0 }}, + {{ 0x2800000, 0x0, 0x0, 0x0, 0x1, 0x400000 }} +}; + + +#define AL_ETH_IS_1G_MAC(mac_mode) (((mac_mode) == AL_ETH_MAC_MODE_RGMII) || ((mac_mode) == AL_ETH_MAC_MODE_SGMII)) +#define AL_ETH_IS_10G_MAC(mac_mode) (((mac_mode) == AL_ETH_MAC_MODE_10GbE_Serial) || \ + ((mac_mode) == AL_ETH_MAC_MODE_10G_SGMII) || \ + ((mac_mode) == AL_ETH_MAC_MODE_SGMII_2_5G)) +#define AL_ETH_IS_25G_MAC(mac_mode) ((mac_mode) == AL_ETH_MAC_MODE_KR_LL_25G) + +static const char *al_eth_mac_mode_str(enum al_eth_mac_mode mode) +{ + switch(mode) { + case AL_ETH_MAC_MODE_RGMII: + return "RGMII"; + case AL_ETH_MAC_MODE_SGMII: + return "SGMII"; + case AL_ETH_MAC_MODE_SGMII_2_5G: + return "SGMII_2_5G"; + case AL_ETH_MAC_MODE_10GbE_Serial: + return "KR"; + case AL_ETH_MAC_MODE_KR_LL_25G: + return "KR_LL_25G"; + case AL_ETH_MAC_MODE_10G_SGMII: + return "10G_SGMII"; + case AL_ETH_MAC_MODE_XLG_LL_40G: + return "40G_LL"; + case AL_ETH_MAC_MODE_XLG_LL_50G: + return "50G_LL"; + default: + return "N/A"; + } +} + +/** + * change and wait udma state + * + * @param dma the udma to change its state + * @param new_state + * + * @return 0 on success. otherwise on failure. + */ +static int al_udma_state_set_wait(struct al_udma *dma, enum al_udma_state new_state) +{ + enum al_udma_state state; + enum al_udma_state expected_state = new_state; + int count = 1000; + int rc; + + rc = al_udma_state_set(dma, new_state); + if (rc != 0) { + al_warn("[%s] warn: failed to change state, error %d\n", dma->name, rc); + return rc; + } + + if ((new_state == UDMA_NORMAL) || (new_state == UDMA_DISABLE)) + expected_state = UDMA_IDLE; + + do { + state = al_udma_state_get(dma); + if (state == expected_state) + break; + al_udelay(1); + if (count-- == 0) { + al_warn("[%s] warn: dma state didn't change to %s\n", + dma->name, al_udma_states_name[new_state]); + return -ETIMEDOUT; + } + } while (1); + return 0; +} + +static void al_eth_epe_entry_set(struct al_hal_eth_adapter *adapter, uint32_t idx, + struct al_eth_epe_p_reg_entry *reg_entry, + struct al_eth_epe_control_entry *control_entry) +{ + al_reg_write32(&adapter->ec_regs_base->epe_p[idx].comp_data, reg_entry->data); + al_reg_write32(&adapter->ec_regs_base->epe_p[idx].comp_mask, reg_entry->mask); + al_reg_write32(&adapter->ec_regs_base->epe_p[idx].comp_ctrl, reg_entry->ctrl); + + al_reg_write32(&adapter->ec_regs_base->msp_c[idx].p_comp_data, reg_entry->data); + al_reg_write32(&adapter->ec_regs_base->msp_c[idx].p_comp_mask, reg_entry->mask); + al_reg_write32(&adapter->ec_regs_base->msp_c[idx].p_comp_ctrl, reg_entry->ctrl); + + /*control table 0*/ + al_reg_write32(&adapter->ec_regs_base->epe[0].act_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->epe[0].act_table_data_6, + control_entry->data[5]); + al_reg_write32(&adapter->ec_regs_base->epe[0].act_table_data_2, + control_entry->data[1]); + al_reg_write32(&adapter->ec_regs_base->epe[0].act_table_data_3, + control_entry->data[2]); + al_reg_write32(&adapter->ec_regs_base->epe[0].act_table_data_4, + control_entry->data[3]); + al_reg_write32(&adapter->ec_regs_base->epe[0].act_table_data_5, + control_entry->data[4]); + al_reg_write32(&adapter->ec_regs_base->epe[0].act_table_data_1, + control_entry->data[0]); + + /*control table 1*/ + al_reg_write32(&adapter->ec_regs_base->epe[1].act_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->epe[1].act_table_data_6, + control_entry->data[5]); + al_reg_write32(&adapter->ec_regs_base->epe[1].act_table_data_2, + control_entry->data[1]); + al_reg_write32(&adapter->ec_regs_base->epe[1].act_table_data_3, + control_entry->data[2]); + al_reg_write32(&adapter->ec_regs_base->epe[1].act_table_data_4, + control_entry->data[3]); + al_reg_write32(&adapter->ec_regs_base->epe[1].act_table_data_5, + control_entry->data[4]); + al_reg_write32(&adapter->ec_regs_base->epe[1].act_table_data_1, + control_entry->data[0]); + +#ifdef AL_ETH_SUPPORT_MACSEC + /* macsec parser */ + al_reg_write32(&adapter->ec_regs_base->msp.p_act_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->msp.p_act_table_data_6, + control_entry->data[5]); + al_reg_write32(&adapter->ec_regs_base->msp.p_act_table_data_2, + control_entry->data[1]); + al_reg_write32(&adapter->ec_regs_base->msp.p_act_table_data_3, + control_entry->data[2]); + al_reg_write32(&adapter->ec_regs_base->msp.p_act_table_data_4, + control_entry->data[3]); + al_reg_write32(&adapter->ec_regs_base->msp.p_act_table_data_5, + control_entry->data[4]); + al_reg_write32(&adapter->ec_regs_base->msp.p_act_table_data_1, + control_entry->data[0]); +#endif +} + +static void al_eth_epe_init(struct al_hal_eth_adapter *adapter) +{ + int idx; + + if (adapter->enable_rx_parser == 0) { + al_dbg("eth [%s]: disable rx parser\n", adapter->name); + + al_reg_write32(&adapter->ec_regs_base->epe[0].res_def, 0x08000000); + al_reg_write32(&adapter->ec_regs_base->epe[0].res_in, 0x7); + + al_reg_write32(&adapter->ec_regs_base->epe[1].res_def, 0x08000000); + al_reg_write32(&adapter->ec_regs_base->epe[1].res_in, 0x7); + +#ifdef AL_ETH_SUPPORT_MACSEC + al_reg_write32(&adapter->ec_regs_base->msp.p_res_def, 0x08000080); + al_reg_write32(&adapter->ec_regs_base->msp.p_res_in, 0x7); +#endif + + return; + } + al_dbg("eth [%s]: enable rx parser\n", adapter->name); + for (idx = 0; idx < AL_ETH_EPE_ENTRIES_NUM; idx++) + al_eth_epe_entry_set(adapter, idx, &al_eth_epe_p_regs[idx], &al_eth_epe_control_table[idx]); + + al_reg_write32(&adapter->ec_regs_base->epe[0].res_def, 0x08000080); + al_reg_write32(&adapter->ec_regs_base->epe[0].res_in, 0x7); + + al_reg_write32(&adapter->ec_regs_base->epe[1].res_def, 0x08000080); + al_reg_write32(&adapter->ec_regs_base->epe[1].res_in, 0); + +#ifdef AL_ETH_SUPPORT_MACSEC + al_reg_write32(&adapter->ec_regs_base->msp.p_res_def, 0x08000080); + al_reg_write32(&adapter->ec_regs_base->msp.p_res_in, 0x7); +#endif + + /* header length as function of 4 bits value, for GRE, when C bit is set, the header len should be increase by 4*/ + al_reg_write32(&adapter->ec_regs_base->epe_h[8].hdr_len, (4 << 16) | 4); + + /* select the outer information when writing the rx descriptor (l3 protocol index etc) */ + al_reg_write32(&adapter->ec_regs_base->rfw.meta, EC_RFW_META_L3_LEN_CALC); + + al_reg_write32(&adapter->ec_regs_base->rfw.checksum, EC_RFW_CHECKSUM_HDR_SEL); +} + +/** + * read 40G MAC registers (indirect access) + * + * @param adapter pointer to the private structure + * @param reg_addr address in the an registers + * + * @return the register value + */ +static uint32_t al_eth_40g_mac_reg_read( + struct al_hal_eth_adapter *adapter, + uint32_t reg_addr) +{ + uint32_t val; + + /* indirect access */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_40g_ll_addr, reg_addr); + val = al_reg_read32(&adapter->mac_regs_base->gen_v3.mac_40g_ll_data); + + al_dbg("[%s]: %s - reg %d. val 0x%x", + adapter->name, __func__, reg_addr, val); + + return val; +} + +/** + * write 40G MAC registers (indirect access) + * + * @param adapter pointer to the private structure + * @param reg_addr address in the an registers + * @param reg_data value to write to the register + * + */ +static void al_eth_40g_mac_reg_write( + struct al_hal_eth_adapter *adapter, + uint32_t reg_addr, + uint32_t reg_data) +{ + /* indirect access */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_40g_ll_addr, reg_addr); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_40g_ll_data, reg_data); + + al_dbg("[%s]: %s - reg %d. val 0x%x", + adapter->name, __func__, reg_addr, reg_data); +} + +/** + * read 40G PCS registers (indirect access) + * + * @param adapter pointer to the private structure + * @param reg_addr address in the an registers + * + * @return the register value + */ +static uint32_t al_eth_40g_pcs_reg_read( + struct al_hal_eth_adapter *adapter, + uint32_t reg_addr) +{ + uint32_t val; + + /* indirect access */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_40g_ll_addr, reg_addr); + val = al_reg_read32(&adapter->mac_regs_base->gen_v3.pcs_40g_ll_data); + + al_dbg("[%s]: %s - reg %d. val 0x%x", + adapter->name, __func__, reg_addr, val); + + return val; +} + +/** + * write 40G PCS registers (indirect access) + * + * @param adapter pointer to the private structure + * @param reg_addr address in the an registers + * @param reg_data value to write to the register + * + */ +void al_eth_40g_pcs_reg_write( + struct al_hal_eth_adapter *adapter, + uint32_t reg_addr, + uint32_t reg_data) +{ + /* indirect access */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_40g_ll_addr, reg_addr); + al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_40g_ll_data, reg_data); + + al_dbg("[%s]: %s - reg %d. val 0x%x", + adapter->name, __func__, reg_addr, reg_data); +} + +/*****************************API Functions **********************************/ +/*adapter management */ +/** + * initialize the ethernet adapter's DMA + */ +int al_eth_adapter_init(struct al_hal_eth_adapter *adapter, struct al_eth_adapter_params *params) +{ + struct al_udma_params udma_params; + struct al_udma_m2s_pkt_len_conf conf; + int i; + uint32_t reg; + int rc; + + al_dbg("eth [%s]: initialize controller's UDMA. id = %d\n", params->name, params->udma_id); + al_dbg("eth [%s]: UDMA base regs: %p\n", params->name, params->udma_regs_base); + al_dbg("eth [%s]: EC base regs: %p\n", params->name, params->ec_regs_base); + al_dbg("eth [%s]: MAC base regs: %p\n", params->name, params->mac_regs_base); + al_dbg("eth [%s]: enable_rx_parser: %x\n", params->name, params->enable_rx_parser); + + adapter->name = params->name; + adapter->dev_id = params->dev_id; + adapter->rev_id = params->rev_id; + adapter->udma_id = params->udma_id; + adapter->udma_regs_base = params->udma_regs_base; + adapter->ec_regs_base = (struct al_ec_regs __iomem*)params->ec_regs_base; + adapter->mac_regs_base = (struct al_eth_mac_regs __iomem*)params->mac_regs_base; + adapter->unit_regs = (struct unit_regs __iomem *)params->udma_regs_base; + adapter->enable_rx_parser = params->enable_rx_parser; + adapter->ec_ints_base = ((void __iomem *)adapter->ec_regs_base) + 0x1c00; + adapter->mac_ints_base = ((void __iomem *)adapter->mac_regs_base) + 0x800; + + /* initialize Tx udma */ + udma_params.udma_reg = (union udma_regs __iomem *)&adapter->unit_regs->m2s; + udma_params.type = UDMA_TX; + udma_params.num_of_queues = AL_ETH_UDMA_TX_QUEUES; + udma_params.name = "eth tx"; + rc = al_udma_init(&adapter->tx_udma, &udma_params); + + if (rc != 0) { + al_err("failed to initialize %s, error %d\n", + udma_params.name, rc); + return rc; + } + rc = al_udma_state_set_wait(&adapter->tx_udma, UDMA_NORMAL); + if (rc != 0) { + al_err("[%s]: failed to change state, error %d\n", + udma_params.name, rc); + return rc; + } + /* initialize Rx udma */ + udma_params.udma_reg = (union udma_regs __iomem *)&adapter->unit_regs->s2m; + udma_params.type = UDMA_RX; + udma_params.num_of_queues = AL_ETH_UDMA_RX_QUEUES; + udma_params.name = "eth rx"; + rc = al_udma_init(&adapter->rx_udma, &udma_params); + + if (rc != 0) { + al_err("failed to initialize %s, error %d\n", + udma_params.name, rc); + return rc; + } + + rc = al_udma_state_set_wait(&adapter->rx_udma, UDMA_NORMAL); + if (rc != 0) { + al_err("[%s]: failed to change state, error %d\n", + udma_params.name, rc); + return rc; + } + al_dbg("eth [%s]: controller's UDMA successfully initialized\n", + params->name); + + /* set max packet size to 1M (for TSO) */ + conf.encode_64k_as_zero = AL_TRUE; + conf.max_pkt_size = 0xfffff; + al_udma_m2s_packet_size_cfg_set(&adapter->tx_udma, &conf); + + /* set m2s (tx) max descriptors to max data buffers number and one for + * meta descriptor + */ + al_udma_m2s_max_descs_set(&adapter->tx_udma, AL_ETH_PKT_MAX_BUFS + 1); + + /* set s2m (rx) max descriptors to max data buffers */ + al_udma_s2m_max_descs_set(&adapter->rx_udma, AL_ETH_PKT_MAX_BUFS); + + /* set s2m burst lenght when writing completion descriptors to 64 bytes + */ + al_udma_s2m_compl_desc_burst_config(&adapter->rx_udma, 64); + + /* if pointer to ec regs provided, then init the tx meta cache of this udma*/ + if (adapter->ec_regs_base != NULL) { + // INIT TX CACHE TABLE: + for (i = 0; i < 4; i++) { + al_reg_write32(&adapter->ec_regs_base->tso.cache_table_addr, i + (adapter->udma_id * 4)); + al_reg_write32(&adapter->ec_regs_base->tso.cache_table_data_1, 0x00000000); + al_reg_write32(&adapter->ec_regs_base->tso.cache_table_data_2, 0x00000000); + al_reg_write32(&adapter->ec_regs_base->tso.cache_table_data_3, 0x00000000); + al_reg_write32(&adapter->ec_regs_base->tso.cache_table_data_4, 0x00000000); + } + } + // only udma 0 allowed to init ec + if (adapter->udma_id != 0) { + return 0; + } + /* enable Ethernet controller: */ + /* enable internal machines*/ + al_reg_write32(&adapter->ec_regs_base->gen.en, 0xffffffff); + al_reg_write32(&adapter->ec_regs_base->gen.fifo_en, 0xffffffff); + + if (adapter->rev_id > AL_ETH_REV_ID_0) { + /* enable A0 descriptor structure */ + al_reg_write32_masked(&adapter->ec_regs_base->gen.en_ext, + EC_GEN_EN_EXT_CACHE_WORD_SPLIT, + EC_GEN_EN_EXT_CACHE_WORD_SPLIT); + + /* use mss value in the descriptor */ + al_reg_write32(&adapter->ec_regs_base->tso.cfg_add_0, + EC_TSO_CFG_ADD_0_MSS_SEL); + + /* enable tunnel TSO */ + al_reg_write32(&adapter->ec_regs_base->tso.cfg_tunnel, + (EC_TSO_CFG_TUNNEL_EN_TUNNEL_TSO | + EC_TSO_CFG_TUNNEL_EN_UDP_CHKSUM | + EC_TSO_CFG_TUNNEL_EN_UDP_LEN | + EC_TSO_CFG_TUNNEL_EN_IPV6_PLEN | + EC_TSO_CFG_TUNNEL_EN_IPV4_CHKSUM | + EC_TSO_CFG_TUNNEL_EN_IPV4_IDEN | + EC_TSO_CFG_TUNNEL_EN_IPV4_TLEN)); + } + + /* swap input byts from MAC RX */ + al_reg_write32(&adapter->ec_regs_base->mac.gen, 0x00000001); + /* swap output bytes to MAC TX*/ + al_reg_write32(&adapter->ec_regs_base->tmi.tx_cfg, EC_TMI_TX_CFG_EN_FWD_TO_RX|EC_TMI_TX_CFG_SWAP_BYTES); + + /* TODO: check if we need this line*/ + al_reg_write32(&adapter->ec_regs_base->tfw_udma[0].fwd_dec, 0x000003fb); + + /* RFW configuration: default 0 */ + al_reg_write32(&adapter->ec_regs_base->rfw_default[0].opt_1, 0x00000001); + + /* VLAN table address*/ + al_reg_write32(&adapter->ec_regs_base->rfw.vid_table_addr, 0x00000000); + /* VLAN table data*/ + al_reg_write32(&adapter->ec_regs_base->rfw.vid_table_data, 0x00000000); + /* HASH config (select toeplitz and bits 7:0 of the thash result, enable + * symmetric hash) */ + al_reg_write32(&adapter->ec_regs_base->rfw.thash_cfg_1, + EC_RFW_THASH_CFG_1_ENABLE_IP_SWAP | + EC_RFW_THASH_CFG_1_ENABLE_PORT_SWAP); + + al_eth_epe_init(adapter); + + /* disable TSO padding and use mac padding instead */ + reg = al_reg_read32(&adapter->ec_regs_base->tso.in_cfg); + reg &= ~0x7F00; /*clear bits 14:8 */ + al_reg_write32(&adapter->ec_regs_base->tso.in_cfg, reg); + + return 0; +} + +/*****************************API Functions **********************************/ +/*adapter management */ +/** + * enable the ec and mac interrupts + */ +int al_eth_ec_mac_ints_config(struct al_hal_eth_adapter *adapter) +{ + + al_dbg("eth [%s]: enable ethernet and mac interrupts\n", adapter->name); + + // only udma 0 allowed to init ec + if (adapter->udma_id != 0) + return -EPERM; + + /* enable mac ints */ + al_iofic_config(adapter->ec_ints_base, AL_INT_GROUP_A, + INT_CONTROL_GRP_SET_ON_POSEDGE | INT_CONTROL_GRP_CLEAR_ON_READ); + al_iofic_config(adapter->ec_ints_base, AL_INT_GROUP_B, + INT_CONTROL_GRP_SET_ON_POSEDGE | INT_CONTROL_GRP_CLEAR_ON_READ); + al_iofic_config(adapter->ec_ints_base, AL_INT_GROUP_C, + INT_CONTROL_GRP_SET_ON_POSEDGE | INT_CONTROL_GRP_CLEAR_ON_READ); + al_iofic_config(adapter->ec_ints_base, AL_INT_GROUP_D, + INT_CONTROL_GRP_SET_ON_POSEDGE | INT_CONTROL_GRP_CLEAR_ON_READ); + + /* unmask MAC int */ + al_iofic_unmask(adapter->ec_ints_base, AL_INT_GROUP_A, 8); + + /* enable ec interrupts */ + al_iofic_config(adapter->mac_ints_base, AL_INT_GROUP_A, + INT_CONTROL_GRP_SET_ON_POSEDGE | INT_CONTROL_GRP_CLEAR_ON_READ); + al_iofic_config(adapter->mac_ints_base, AL_INT_GROUP_B, + INT_CONTROL_GRP_SET_ON_POSEDGE | INT_CONTROL_GRP_CLEAR_ON_READ); + al_iofic_config(adapter->mac_ints_base, AL_INT_GROUP_C, + INT_CONTROL_GRP_SET_ON_POSEDGE | INT_CONTROL_GRP_CLEAR_ON_READ); + al_iofic_config(adapter->mac_ints_base, AL_INT_GROUP_D, + INT_CONTROL_GRP_SET_ON_POSEDGE | INT_CONTROL_GRP_CLEAR_ON_READ); + + /* eee active */ + al_iofic_unmask(adapter->mac_ints_base, AL_INT_GROUP_B, AL_BIT(14)); + + al_iofic_unmask(adapter->unit_regs, AL_INT_GROUP_D, AL_BIT(11)); + return 0; +} + +/** + * ec and mac interrupt service routine + * read and print asserted interrupts + * + * @param adapter pointer to the private structure + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_ec_mac_isr(struct al_hal_eth_adapter *adapter) +{ + uint32_t cause; + al_dbg("[%s]: ethernet interrupts handler\n", adapter->name); + + // only udma 0 allowed to init ec + if (adapter->udma_id != 0) + return -EPERM; + + /* read ec cause */ + cause = al_iofic_read_cause(adapter->ec_ints_base, AL_INT_GROUP_A); + al_dbg("[%s]: ethernet group A cause 0x%08x\n", adapter->name, cause); + if (cause & 1) + { + cause = al_iofic_read_cause(adapter->mac_ints_base, AL_INT_GROUP_A); + al_dbg("[%s]: mac group A cause 0x%08x\n", adapter->name, cause); + + cause = al_iofic_read_cause(adapter->mac_ints_base, AL_INT_GROUP_B); + al_dbg("[%s]: mac group B cause 0x%08x\n", adapter->name, cause); + + cause = al_iofic_read_cause(adapter->mac_ints_base, AL_INT_GROUP_C); + al_dbg("[%s]: mac group C cause 0x%08x\n", adapter->name, cause); + + cause = al_iofic_read_cause(adapter->mac_ints_base, AL_INT_GROUP_D); + al_dbg("[%s]: mac group D cause 0x%08x\n", adapter->name, cause); + } + cause = al_iofic_read_cause(adapter->ec_ints_base, AL_INT_GROUP_B); + al_dbg("[%s]: ethernet group B cause 0x%08x\n", adapter->name, cause); + cause = al_iofic_read_cause(adapter->ec_ints_base, AL_INT_GROUP_C); + al_dbg("[%s]: ethernet group C cause 0x%08x\n", adapter->name, cause); + cause = al_iofic_read_cause(adapter->ec_ints_base, AL_INT_GROUP_D); + al_dbg("[%s]: ethernet group D cause 0x%08x\n", adapter->name, cause); + + return 0; +} + +/** + * stop the DMA of the ethernet adapter + */ +int al_eth_adapter_stop(struct al_hal_eth_adapter *adapter) +{ + int rc; + + al_dbg("eth [%s]: stop controller's UDMA\n", adapter->name); + + /* disable Tx dma*/ + rc = al_udma_state_set_wait(&adapter->tx_udma, UDMA_DISABLE); + if (rc != 0) { + al_warn("[%s] warn: failed to change state, error %d\n", + adapter->tx_udma.name, rc); + return rc; + } + + al_dbg("eth [%s]: controller's TX UDMA stopped\n", + adapter->name); + /* disable Rx dma*/ + rc = al_udma_state_set_wait(&adapter->rx_udma, UDMA_DISABLE); + if (rc != 0) { + al_warn("[%s] warn: failed to change state, error %d\n", + adapter->rx_udma.name, rc); + return rc; + } + + al_dbg("eth [%s]: controller's RX UDMA stopped\n", + adapter->name); + return 0; +} + +int al_eth_adapter_reset(struct al_hal_eth_adapter *adapter) +{ + al_dbg("eth [%s]: reset controller's UDMA\n", adapter->name); + + return -EPERM; +} + +/* Q management */ +/** + * Configure and enable a queue ring + */ +int al_eth_queue_config(struct al_hal_eth_adapter *adapter, enum al_udma_type type, uint32_t qid, + struct al_udma_q_params *q_params) +{ + struct al_udma *udma; + int rc; + + al_dbg("eth [%s]: config UDMA %s queue %d\n", adapter->name, + type == UDMA_TX ? "Tx" : "Rx", qid); + + if (type == UDMA_TX) { + udma = &adapter->tx_udma; + } else { + udma = &adapter->rx_udma; + } + + q_params->dev_id = adapter->dev_id; + q_params->rev_id = adapter->rev_id; + + rc = al_udma_q_init(udma, qid, q_params); + if (rc) + return rc; + + if (type == UDMA_RX) + rc = al_udma_s2m_q_compl_coal_config(&udma->udma_q[qid], + AL_TRUE, AL_ETH_S2M_UDMA_COMP_COAL_TIMEOUT); + return rc; +} + +int al_eth_queue_enable(struct al_hal_eth_adapter *adapter, + enum al_udma_type type, + uint32_t qid) +{ + struct al_udma *udma; + int rc; + + al_dbg("eth [%s]: disable UDMA %s queue %d\n", adapter->name, + type == UDMA_TX ? "Tx" : "Rx", qid); + + if (type == UDMA_TX) { + udma = &adapter->tx_udma; + } else { + udma = &adapter->rx_udma; + } + + rc = al_udma_q_enable(&udma->udma_q[qid], AL_TRUE); + + return rc; +} +int al_eth_queue_disable(struct al_hal_eth_adapter *adapter, + enum al_udma_type type, + uint32_t qid) +{ + struct al_udma *udma; + int rc; + + al_dbg("eth [%s]: disable UDMA %s queue %d\n", adapter->name, + type == UDMA_TX ? "Tx" : "Rx", qid); + + if (type == UDMA_TX) { + udma = &adapter->tx_udma; + } else { + udma = &adapter->rx_udma; + } + + rc = al_udma_q_enable(&udma->udma_q[qid], AL_FALSE); + if (rc) + return rc; + + rc = al_udma_q_reset(&udma->udma_q[qid]); + + return rc; +} + +/* MAC layer */ +int al_eth_rx_pkt_limit_config(struct al_hal_eth_adapter *adapter, uint32_t min_rx_len, uint32_t max_rx_len) +{ + al_assert(max_rx_len <= AL_ETH_MAX_FRAME_LEN); + + /* EC minimum packet length [bytes] in RX */ + al_reg_write32(&adapter->ec_regs_base->mac.min_pkt, min_rx_len); + /* EC maximum packet length [bytes] in RX */ + al_reg_write32(&adapter->ec_regs_base->mac.max_pkt, max_rx_len); + + if (adapter->rev_id > AL_ETH_REV_ID_2) { + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_1, min_rx_len); + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_2, max_rx_len); + } + + /* configure the MAC's max rx length, add 16 bytes so the packet get + * trimmed by the EC/Async_fifo rather by the MAC + */ + if (AL_ETH_IS_1G_MAC(adapter->mac_mode)) + al_reg_write32(&adapter->mac_regs_base->mac_1g.frm_len, max_rx_len + 16); + else if (AL_ETH_IS_10G_MAC(adapter->mac_mode) || AL_ETH_IS_25G_MAC(adapter->mac_mode)) + /* 10G MAC control register */ + al_reg_write32(&adapter->mac_regs_base->mac_10g.frm_len, (max_rx_len + 16)); + else + al_eth_40g_mac_reg_write(adapter, ETH_MAC_GEN_V3_MAC_40G_FRM_LENGTH_ADDR, (max_rx_len + 16)); + + return 0; +} + +/* configure the mac media type. */ +int al_eth_mac_config(struct al_hal_eth_adapter *adapter, enum al_eth_mac_mode mode) +{ + switch(mode) { + case AL_ETH_MAC_MODE_RGMII: + al_reg_write32(&adapter->mac_regs_base->gen.clk_cfg, 0x40003210); + + /* 1G MAC control register */ + /* bit[0] - TX_ENA - zeroed by default. Should be asserted by al_eth_mac_start + * bit[1] - RX_ENA - zeroed by default. Should be asserted by al_eth_mac_start + * bit[3] - ETH_SPEED - zeroed to enable 10/100 Mbps Ethernet + * bit[4] - PROMIS_EN - asserted to enable MAC promiscuous mode + * bit[23] - CNTL_FRM-ENA - asserted to enable control frames + * bit[24] - NO_LGTH_CHECK - asserted to disable length checks, which is done in the controller + */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.cmd_cfg, 0x01800010); + + /* RX_SECTION_EMPTY, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.rx_section_empty, 0x00000000); + /* RX_SECTION_FULL, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.rx_section_full, 0x0000000c); /* must be larger than almost empty */ + /* RX_ALMOST_EMPTY, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.rx_almost_empty, 0x00000008); + /* RX_ALMOST_FULL, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.rx_almost_full, 0x00000008); + + + /* TX_SECTION_EMPTY, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.tx_section_empty, 0x00000008); /* 8 ? */ + /* TX_SECTION_FULL, 0 - store and forward, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.tx_section_full, 0x0000000c); + /* TX_ALMOST_EMPTY, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.tx_almost_empty, 0x00000008); + /* TX_ALMOST_FULL, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.tx_almost_full, 0x00000008); + + /* XAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.cfg, 0x00000000); + + /* 1G MACSET 1G */ + /* taking sel_1000/sel_10 inputs from rgmii PHY, and not from register. + * disabling magic_packets detection in mac */ + al_reg_write32(&adapter->mac_regs_base->gen.mac_1g_cfg, 0x00000002); + /* RGMII set 1G */ + al_reg_write32_masked(&adapter->mac_regs_base->gen.mux_sel, ~ETH_MAC_GEN_MUX_SEL_KR_IN_MASK, 0x00063910); + al_reg_write32(&adapter->mac_regs_base->gen.rgmii_sel, 0xF); + break; + case AL_ETH_MAC_MODE_SGMII: + if (adapter->rev_id > AL_ETH_REV_ID_2) { + /* configure and enable the ASYNC FIFO between the MACs and the EC */ + /* TX min packet size */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_1, 0x00000037); + /* TX max packet size */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_2, 0x00002800); + /* TX input bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_3, 0x00000080); + /* TX output bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_4, 0x00030020); + /* TX Valid/ready configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_5, 0x00000121); + /* RX min packet size */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_1, 0x00000040); */ + /* RX max packet size */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_2, 0x00002800); */ + /* RX input bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_3, 0x00030020); + /* RX output bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_4, 0x00000080); + /* RX Valid/ready configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_5, 0x00000212); + /* V3 additional MAC selection */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_sel, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_10g_ll_cfg, 0x00000001); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_10g_ll_ctrl, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_10g_ll_cfg, 0x00000000); + /* ASYNC FIFO ENABLE */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.afifo_ctrl, 0x00003333); + } + + al_reg_write32(&adapter->mac_regs_base->gen.clk_cfg, 0x40053210); + + /* 1G MAC control register */ + /* bit[0] - TX_ENA - zeroed by default. Should be asserted by al_eth_mac_start + * bit[1] - RX_ENA - zeroed by default. Should be asserted by al_eth_mac_start + * bit[3] - ETH_SPEED - zeroed to enable 10/100 Mbps Ethernet + * bit[4] - PROMIS_EN - asserted to enable MAC promiscuous mode + * bit[23] - CNTL_FRM-ENA - asserted to enable control frames + * bit[24] - NO_LGTH_CHECK - asserted to disable length checks, which is done in the controller + */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.cmd_cfg, 0x01800010); + + /* RX_SECTION_EMPTY, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.rx_section_empty, 0x00000000); + /* RX_SECTION_FULL, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.rx_section_full, 0x0000000c); /* must be larger than almost empty */ + /* RX_ALMOST_EMPTY, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.rx_almost_empty, 0x00000008); + /* RX_ALMOST_FULL, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.rx_almost_full, 0x00000008); + + + /* TX_SECTION_EMPTY, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.tx_section_empty, 0x00000008); /* 8 ? */ + /* TX_SECTION_FULL, 0 - store and forward, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.tx_section_full, 0x0000000c); + /* TX_ALMOST_EMPTY, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.tx_almost_empty, 0x00000008); + /* TX_ALMOST_FULL, */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.tx_almost_full, 0x00000008); + + /* XAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.cfg, 0x000000c0); + + /* 1G MACSET 1G */ + /* taking sel_1000/sel_10 inputs from rgmii_converter, and not from register. + * disabling magic_packets detection in mac */ + al_reg_write32(&adapter->mac_regs_base->gen.mac_1g_cfg, 0x00000002); + /* SerDes configuration */ + al_reg_write32_masked(&adapter->mac_regs_base->gen.mux_sel, ~ETH_MAC_GEN_MUX_SEL_KR_IN_MASK, 0x00063910); + al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x000004f0); + al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x00000401); + + // FAST AN -- Testing only +#ifdef AL_HAL_ETH_FAST_AN + al_reg_write32(&adapter->mac_regs_base->sgmii.reg_addr, 0x00000012); + al_reg_write32(&adapter->mac_regs_base->sgmii.reg_data, 0x00000040); + al_reg_write32(&adapter->mac_regs_base->sgmii.reg_addr, 0x00000013); + al_reg_write32(&adapter->mac_regs_base->sgmii.reg_data, 0x00000000); +#endif + + /* Setting PCS i/f mode to SGMII (instead of default 1000Base-X) */ + al_reg_write32(&adapter->mac_regs_base->sgmii.reg_addr, 0x00000014); + al_reg_write32(&adapter->mac_regs_base->sgmii.reg_data, 0x0000000b); + al_reg_write32_masked(&adapter->mac_regs_base->gen.led_cfg, + ETH_MAC_GEN_LED_CFG_SEL_MASK, + ETH_MAC_GEN_LED_CFG_SEL_DEFAULT_REG); + break; + + case AL_ETH_MAC_MODE_SGMII_2_5G: + if (adapter->rev_id > AL_ETH_REV_ID_2) { + /* configure and enable the ASYNC FIFO between the MACs and the EC */ + /* TX min packet size */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_1, 0x00000037); + /* TX max packet size */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_2, 0x00002800); + /* TX input bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_3, 0x00000080); + /* TX output bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_4, 0x00030020); + /* TX Valid/ready configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_5, 0x00000023); + /* RX input bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_3, 0x00030020); + /* RX output bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_4, 0x00000080); + /* RX Valid/ready configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_5, 0x00000012); + /* V3 additional MAC selection */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_sel, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_10g_ll_cfg, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_10g_ll_ctrl, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_10g_ll_cfg, 0x00000050); + /* ASYNC FIFO ENABLE */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.afifo_ctrl, 0x00003333); + } + + /* MAC register file */ + al_reg_write32(&adapter->mac_regs_base->mac_10g.cmd_cfg, 0x01022830); + /* XAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.cfg, 0x00000001); + al_reg_write32(&adapter->mac_regs_base->mac_10g.if_mode, 0x00000028); + al_reg_write32(&adapter->mac_regs_base->mac_10g.control, 0x00001140); + /* RXAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.xgmii_dfifo_32_64, 0x00000401); +/* al_reg_write32(&adapter->mac_regs_base->gen.mac_res_1_out, 0x00000401); */ + al_reg_write32(&adapter->mac_regs_base->gen.xgmii_dfifo_64_32, 0x00000401); +/* al_reg_write32(&adapter->mac_regs_base->gen.mac_res_1_in, 0x00000401); */ + al_reg_write32_masked(&adapter->mac_regs_base->gen.mux_sel, + ~ETH_MAC_GEN_MUX_SEL_KR_IN_MASK, 0x00063910); + al_reg_write32(&adapter->mac_regs_base->gen.clk_cfg, 0x40003210); + al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x000004f0); + al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x00000401); + + al_reg_write32_masked(&adapter->mac_regs_base->gen.led_cfg, + ETH_MAC_GEN_LED_CFG_SEL_MASK, + ETH_MAC_GEN_LED_CFG_SEL_DEFAULT_REG); + break; + + case AL_ETH_MAC_MODE_10GbE_Serial: + if (adapter->rev_id > AL_ETH_REV_ID_2) { + /* configure and enable the ASYNC FIFO between the MACs and the EC */ + /* TX min packet size */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_1, 0x00000037); + /* TX max packet size */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_2, 0x00002800); + /* TX input bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_3, 0x00000080); + /* TX output bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_4, 0x00030020); + /* TX Valid/ready configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_5, 0x00000023); + /* RX min packet size */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_1, 0x00000040); */ + /* RX max packet size */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_2, 0x00002800); */ + /* RX input bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_3, 0x00030020); + /* RX output bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_4, 0x00000080); + /* RX Valid/ready configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_5, 0x00000012); + /* V3 additional MAC selection */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_sel, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_10g_ll_cfg, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_10g_ll_ctrl, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_10g_ll_cfg, 0x00000050); + /* ASYNC FIFO ENABLE */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.afifo_ctrl, 0x00003333); + } + + /* MAC register file */ + al_reg_write32(&adapter->mac_regs_base->mac_10g.cmd_cfg, 0x01022810); + /* XAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.cfg, 0x00000005); + /* RXAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.rxaui_cfg, 0x00000007); + al_reg_write32(&adapter->mac_regs_base->gen.sd_cfg, 0x000001F1); + al_reg_write32(&adapter->mac_regs_base->gen.xgmii_dfifo_32_64, 0x00000401); +/* al_reg_write32(&adapter->mac_regs_base->gen.mac_res_1_out, 0x00000401); */ + al_reg_write32(&adapter->mac_regs_base->gen.xgmii_dfifo_64_32, 0x00000401); +/* al_reg_write32(&adapter->mac_regs_base->gen.mac_res_1_in, 0x00000401); */ + al_reg_write32_masked(&adapter->mac_regs_base->gen.mux_sel, ~ETH_MAC_GEN_MUX_SEL_KR_IN_MASK, 0x00073910); + al_reg_write32(&adapter->mac_regs_base->gen.clk_cfg, 0x10003210); + al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x000004f0); + al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x00000401); + + al_reg_write32_masked(&adapter->mac_regs_base->gen.led_cfg, + ETH_MAC_GEN_LED_CFG_SEL_MASK, + ETH_MAC_GEN_LED_CFG_SEL_DEFAULT_REG); + break; + + case AL_ETH_MAC_MODE_KR_LL_25G: + /* select 25G SERDES lane 0 and lane 1 */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.ext_serdes_ctrl, 0x00000003); + if (adapter->rev_id > AL_ETH_REV_ID_2) { + /* configure and enable the ASYNC FIFO between the MACs and the EC */ + /* TX min packet size */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_1, 0x00000037); + /* TX max packet size */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_2, 0x00002800); + /* TX input bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_3, 0x00000080); + /* TX output bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_4, 0x00030020); + /* TX Valid/ready configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_5, 0x00000023); + /* RX min packet size */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_1, 0x00000040); */ + /* RX max packet size */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_2, 0x00002800); */ + /* RX input bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_3, 0x00030020); + /* RX output bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_4, 0x00000080); + /* RX Valid/ready configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_5, 0x00000012); + /* V3 additional MAC selection */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_sel, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_10g_ll_cfg, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_10g_ll_ctrl, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_10g_ll_cfg, 0x000000a0); + /* ASYNC FIFO ENABLE */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.afifo_ctrl, 0x00003333); + } + + /* MAC register file */ + al_reg_write32(&adapter->mac_regs_base->mac_10g.cmd_cfg, 0x01022810); + /* XAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.cfg, 0x00000005); + /* RXAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.rxaui_cfg, 0x00000007); + al_reg_write32(&adapter->mac_regs_base->gen.sd_cfg, 0x000001F1); + al_reg_write32(&adapter->mac_regs_base->gen.xgmii_dfifo_32_64, 0x00000401); +/* al_reg_write32(&adapter->mac_regs_base->gen.mac_res_1_out, 0x00000401); */ + al_reg_write32(&adapter->mac_regs_base->gen.xgmii_dfifo_64_32, 0x00000401); +/* al_reg_write32(&adapter->mac_regs_base->gen.mac_res_1_in, 0x00000401); */ + al_reg_write32_masked(&adapter->mac_regs_base->gen.mux_sel, ~ETH_MAC_GEN_MUX_SEL_KR_IN_MASK, 0x00073910); + al_reg_write32(&adapter->mac_regs_base->gen.clk_cfg, 0x10003210); + al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x000004f0); + al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x00000401); + + al_reg_write32_masked(&adapter->mac_regs_base->gen.led_cfg, + ETH_MAC_GEN_LED_CFG_SEL_MASK, + ETH_MAC_GEN_LED_CFG_SEL_DEFAULT_REG); + break; + + case AL_ETH_MAC_MODE_10G_SGMII: + /* MAC register file */ + al_reg_write32(&adapter->mac_regs_base->mac_10g.cmd_cfg, 0x01022810); + + /* XAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.cfg, 0x00000001); + + al_reg_write32(&adapter->mac_regs_base->mac_10g.if_mode, 0x0000002b); + al_reg_write32(&adapter->mac_regs_base->mac_10g.control, 0x00009140); + // FAST AN -- Testing only +#ifdef AL_HAL_ETH_FAST_AN + al_reg_write32(&adapter->mac_regs_base->mac_10g.link_timer_lo, 0x00000040); + al_reg_write32(&adapter->mac_regs_base->mac_10g.link_timer_hi, 0x00000000); +#endif + + /* RXAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.rxaui_cfg, 0x00000007); + al_reg_write32(&adapter->mac_regs_base->gen.xgmii_dfifo_32_64, 0x00000401); +/* al_reg_write32(&adapter->mac_regs_base->gen.mac_res_1_out, 0x00000401); */ + al_reg_write32(&adapter->mac_regs_base->gen.xgmii_dfifo_64_32, 0x00000401); +/* al_reg_write32(&adapter->mac_regs_base->gen.mac_res_1_in, 0x00000401); */ + al_reg_write32_masked(&adapter->mac_regs_base->gen.mux_sel, ~ETH_MAC_GEN_MUX_SEL_KR_IN_MASK, 0x00063910); + al_reg_write32(&adapter->mac_regs_base->gen.clk_cfg, 0x40003210); + al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x00000401); + + al_reg_write32_masked(&adapter->mac_regs_base->gen.led_cfg, + ETH_MAC_GEN_LED_CFG_SEL_MASK, + ETH_MAC_GEN_LED_CFG_SEL_DEFAULT_REG); + break; + + case AL_ETH_MAC_MODE_XLG_LL_40G: + /* configure and enable the ASYNC FIFO between the MACs and the EC */ + /* TX min packet size */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_1, 0x00000037); + /* TX max packet size */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_2, 0x00002800); + /* TX input bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_3, 0x00000080); + /* TX output bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_4, 0x00010040); + /* TX Valid/ready configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_5, 0x00000023); + /* RX min packet size */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_1, 0x00000040); */ + /* RX max packet size */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_2, 0x00002800); */ + /* RX input bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_3, 0x00010040); + /* RX output bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_4, 0x00000080); + /* RX Valid/ready configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_5, 0x00000112); + /* V3 additional MAC selection */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_sel, 0x00000010); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_10g_ll_cfg, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_10g_ll_ctrl, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_10g_ll_cfg, 0x00000000); + /* ASYNC FIFO ENABLE */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.afifo_ctrl, 0x00003333); + + /* cmd_cfg */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_40g_ll_addr, 0x00000008); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_40g_ll_data, 0x01022810); + /* speed_ability //Read-Only */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_40g_ll_addr, 0x00000008); */ + /* 40G capable */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_40g_ll_data, 0x00000002); */ + +#ifdef AL_HAL_ETH_FAST_AN + al_eth_40g_pcs_reg_write(adapter, 0x00010004, 1023); + al_eth_40g_pcs_reg_write(adapter, 0x00000000, 0xA04c); + al_eth_40g_pcs_reg_write(adapter, 0x00000000, 0x204c); +#endif + + /* XAUI MAC control register */ + al_reg_write32_masked(&adapter->mac_regs_base->gen.mux_sel, ~ETH_MAC_GEN_MUX_SEL_KR_IN_MASK, 0x06883910); + al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x0000040f); + + /* MAC register file */ +/* al_reg_write32(&adapter->mac_regs_base->mac_10g.cmd_cfg, 0x01022810); */ + /* XAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.cfg, 0x00000005); + /* RXAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.rxaui_cfg, 0x00000007); + al_reg_write32(&adapter->mac_regs_base->gen.sd_cfg, 0x000001F1); + al_reg_write32(&adapter->mac_regs_base->gen.xgmii_dfifo_32_64, 0x00000401); +/* al_reg_write32(&adapter->mac_regs_base->gen.mac_res_1_out, 0x00000401); */ + al_reg_write32(&adapter->mac_regs_base->gen.xgmii_dfifo_64_32, 0x00000401); +/* al_reg_write32(&adapter->mac_regs_base->gen.mac_res_1_in, 0x00000401); */ +/* al_reg_write32_masked(&adapter->mac_regs_base->gen.mux_sel, ~ETH_MAC_GEN_MUX_SEL_KR_IN_MASK, 0x00073910); *//* XLG_LL_40G change */ + al_reg_write32(&adapter->mac_regs_base->gen.clk_cfg, 0x10003210); +/* al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x000004f0); *//* XLG_LL_40G change */ +/* al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x00000401); *//* XLG_LL_40G change */ + + al_reg_write32_masked(&adapter->mac_regs_base->gen.led_cfg, + ETH_MAC_GEN_LED_CFG_SEL_MASK, + ETH_MAC_GEN_LED_CFG_SEL_DEFAULT_REG); + break; + + case AL_ETH_MAC_MODE_XLG_LL_50G: + + /* configure and enable the ASYNC FIFO between the MACs and the EC */ + /* TX min packet size */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_1, 0x00000037); + /* TX max packet size */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_2, 0x00002800); + /* TX input bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_3, 0x00000080); + /* TX output bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_4, 0x00010040); + /* TX Valid/ready configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.tx_afifo_cfg_5, 0x00000023); + /* RX min packet size */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_1, 0x00000040); */ + /* RX max packet size */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_2, 0x00002800); */ + /* RX input bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_3, 0x00010040); + /* RX output bus configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_4, 0x00000080); + /* RX Valid/ready configuration */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.rx_afifo_cfg_5, 0x00000112); + /* V3 additional MAC selection */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_sel, 0x00000010); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_10g_ll_cfg, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_10g_ll_ctrl, 0x00000000); + al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_10g_ll_cfg, 0x00000000); + /* ASYNC FIFO ENABLE */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.afifo_ctrl, 0x00003333); + + /* cmd_cfg */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_40g_ll_addr, 0x00000008); + al_reg_write32(&adapter->mac_regs_base->gen_v3.mac_40g_ll_data, 0x01022810); + /* speed_ability //Read-Only */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_40g_ll_addr, 0x00000008); */ + /* 40G capable */ + /* al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_40g_ll_data, 0x00000002); */ + + /* select the 25G serdes for lanes 0/1 */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.ext_serdes_ctrl, 0x0000000F); + /* configure the PCS to work with 2 lanes */ + al_eth_40g_pcs_reg_write(adapter, 0x00010008, 0x0d81); + /* configure the PCS to work 32 bit interface */ + al_reg_write32(&adapter->mac_regs_base->gen_v3.pcs_40g_ll_cfg, 0x00440000); + + +#ifdef AL_HAL_ETH_FAST_AN + al_eth_40g_pcs_reg_write(adapter, 0x00010004, 1023); + al_eth_40g_pcs_reg_write(adapter, 0x00000000, 0xA04c); + al_eth_40g_pcs_reg_write(adapter, 0x00000000, 0x204c); +#endif + + /* XAUI MAC control register */ + al_reg_write32_masked(&adapter->mac_regs_base->gen.mux_sel, ~ETH_MAC_GEN_MUX_SEL_KR_IN_MASK, 0x06883910); + al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x0000040f); + + /* MAC register file */ +/* al_reg_write32(&adapter->mac_regs_base->mac_10g.cmd_cfg, 0x01022810); */ + /* XAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.cfg, 0x00000005); + /* RXAUI MAC control register */ + al_reg_write32(&adapter->mac_regs_base->gen.rxaui_cfg, 0x00000007); + al_reg_write32(&adapter->mac_regs_base->gen.sd_cfg, 0x000001F1); + al_reg_write32(&adapter->mac_regs_base->gen.xgmii_dfifo_32_64, 0x00000401); +/* al_reg_write32(&adapter->mac_regs_base->gen.mac_res_1_out, 0x00000401); */ + al_reg_write32(&adapter->mac_regs_base->gen.xgmii_dfifo_64_32, 0x00000401); +/* al_reg_write32(&adapter->mac_regs_base->gen.mac_res_1_in, 0x00000401); */ +/* al_reg_write32_masked(&adapter->mac_regs_base->gen.mux_sel, ~ETH_MAC_GEN_MUX_SEL_KR_IN_MASK, 0x00073910); *//* XLG_LL_40G change */ + al_reg_write32(&adapter->mac_regs_base->gen.clk_cfg, 0x10003210); +/* al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x000004f0); *//* XLG_LL_40G change */ +/* al_reg_write32(&adapter->mac_regs_base->gen.sd_fifo_ctrl, 0x00000401); *//* XLG_LL_40G change */ + + al_reg_write32_masked(&adapter->mac_regs_base->gen.led_cfg, + ETH_MAC_GEN_LED_CFG_SEL_MASK, + ETH_MAC_GEN_LED_CFG_SEL_DEFAULT_REG); + break; + + + default: + al_err("Eth: unsupported MAC mode %d", mode); + return -EPERM; + } + adapter->mac_mode = mode; + al_info("configured MAC to %s mode:\n", al_eth_mac_mode_str(mode)); + + return 0; +} + +/* start the mac */ +int al_eth_mac_start(struct al_hal_eth_adapter *adapter) +{ + if (AL_ETH_IS_1G_MAC(adapter->mac_mode)) { + /* 1G MAC control register */ + al_reg_write32_masked(&adapter->mac_regs_base->mac_1g.cmd_cfg, 0x3, 0x3); + } else if (AL_ETH_IS_10G_MAC(adapter->mac_mode) || AL_ETH_IS_25G_MAC(adapter->mac_mode)) { + /* 10G MAC control register */ + al_reg_write32_masked(&adapter->mac_regs_base->mac_10g.cmd_cfg, 0x3, 0x3); + } else { + uint32_t cmd_cfg; + + cmd_cfg = al_eth_40g_mac_reg_read(adapter, ETH_MAC_GEN_V3_MAC_40G_COMMAND_CONFIG_ADDR); + + cmd_cfg |= (ETH_MAC_GEN_V3_MAC_40G_COMMAND_CONFIG_TX_ENA | + ETH_MAC_GEN_V3_MAC_40G_COMMAND_CONFIG_RX_ENA); + + al_eth_40g_mac_reg_write(adapter, ETH_MAC_GEN_V3_MAC_40G_COMMAND_CONFIG_ADDR, cmd_cfg); + } + + return 0; +} + +/* stop the mac */ +int al_eth_mac_stop(struct al_hal_eth_adapter *adapter) +{ + if (AL_ETH_IS_1G_MAC(adapter->mac_mode)) + /* 1G MAC control register */ + al_reg_write32(&adapter->mac_regs_base->mac_1g.cmd_cfg, 0x0); + else if (AL_ETH_IS_10G_MAC(adapter->mac_mode) || AL_ETH_IS_25G_MAC(adapter->mac_mode)) + /* 10G MAC control register */ + al_reg_write32(&adapter->mac_regs_base->mac_10g.cmd_cfg, 0x0); + else + al_eth_40g_mac_reg_write(adapter, ETH_MAC_GEN_V3_MAC_40G_COMMAND_CONFIG_ADDR, 0); + + return 0; +} + +int al_eth_capabilities_get(struct al_hal_eth_adapter *adapter, struct al_eth_capabilities *caps) +{ + al_assert(caps); + + caps->speed_10_HD = AL_FALSE; + caps->speed_10_FD = AL_FALSE; + caps->speed_100_HD = AL_FALSE; + caps->speed_100_FD = AL_FALSE; + caps->speed_1000_HD = AL_FALSE; + caps->speed_1000_FD = AL_FALSE; + caps->speed_10000_HD = AL_FALSE; + caps->speed_10000_FD = AL_FALSE; + caps->pfc = AL_FALSE; + caps->eee = AL_FALSE; + + switch (adapter->mac_mode) { + case AL_ETH_MAC_MODE_RGMII: + case AL_ETH_MAC_MODE_SGMII: + caps->speed_10_HD = AL_TRUE; + caps->speed_10_FD = AL_TRUE; + caps->speed_100_HD = AL_TRUE; + caps->speed_100_FD = AL_TRUE; + caps->speed_1000_FD = AL_TRUE; + caps->eee = AL_TRUE; + break; + case AL_ETH_MAC_MODE_10GbE_Serial: + caps->speed_10000_FD = AL_TRUE; + caps->pfc = AL_TRUE; + break; + default: + al_err("Eth: unsupported MAC mode %d", adapter->mac_mode); + return -EPERM; + } + return 0; +} + +/* update link speed and duplex mode */ +int al_eth_mac_link_config(struct al_hal_eth_adapter *adapter, + al_bool force_1000_base_x, + al_bool an_enable, + uint32_t speed, + al_bool full_duplex) +{ + uint32_t mac_ctrl; + uint32_t sgmii_ctrl = 0; + uint32_t sgmii_if_mode = 0; + uint32_t rgmii_ctrl = 0; + + if (!AL_ETH_IS_1G_MAC(adapter->mac_mode)) { + al_err("eth [%s]: this function not supported in this mac mode.\n", + adapter->name); + return -EINVAL; + } + + if ((adapter->mac_mode != AL_ETH_MAC_MODE_RGMII) && (an_enable)) { + /* + * an_enable is not relevant to RGMII mode. + * in AN mode speed and duplex aren't relevant. + */ + al_info("eth [%s]: set auto negotiation to enable\n", adapter->name); + } else { + al_info("eth [%s]: set link speed to %dMbps. %s duplex.\n", adapter->name, + speed, full_duplex == AL_TRUE ? "full" : "half"); + + if ((speed != 10) && (speed != 100) && (speed != 1000)) { + al_err("eth [%s]: bad speed parameter (%d).\n", + adapter->name, speed); + return -EINVAL; + } + if ((speed == 1000) && (full_duplex == AL_FALSE)) { + al_err("eth [%s]: half duplex in 1Gbps is not supported.\n", + adapter->name); + return -EINVAL; + } + } + + mac_ctrl = al_reg_read32(&adapter->mac_regs_base->mac_1g.cmd_cfg); + + if (adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) { + al_reg_write32(&adapter->mac_regs_base->sgmii.reg_addr, + ETH_MAC_SGMII_REG_ADDR_CTRL_REG); + sgmii_ctrl = al_reg_read32(&adapter->mac_regs_base->sgmii.reg_data); + /* + * in case bit 0 is off in sgmii_if_mode register all the other + * bits are ignored. + */ + if (force_1000_base_x == AL_FALSE) + sgmii_if_mode = ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_EN; + + if (an_enable == AL_TRUE) { + sgmii_if_mode |= ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_AN; + sgmii_ctrl |= ETH_MAC_SGMII_REG_DATA_CTRL_AN_ENABLE; + } else { + sgmii_ctrl &= ~(ETH_MAC_SGMII_REG_DATA_CTRL_AN_ENABLE); + } + } + + if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) { + /* + * Use the speed provided by the MAC instead of the PHY + */ + rgmii_ctrl = al_reg_read32(&adapter->mac_regs_base->gen.rgmii_cfg); + + AL_REG_MASK_CLEAR(rgmii_ctrl, ETH_MAC_GEN_RGMII_CFG_ENA_AUTO); + AL_REG_MASK_CLEAR(rgmii_ctrl, ETH_MAC_GEN_RGMII_CFG_SET_1000_SEL); + AL_REG_MASK_CLEAR(rgmii_ctrl, ETH_MAC_GEN_RGMII_CFG_SET_10_SEL); + + al_reg_write32(&adapter->mac_regs_base->gen.rgmii_cfg, rgmii_ctrl); + } + + if (full_duplex == AL_TRUE) { + AL_REG_MASK_CLEAR(mac_ctrl, AL_ETH_1G_MAC_CTRL_HD_EN); + } else { + AL_REG_MASK_SET(mac_ctrl, AL_ETH_1G_MAC_CTRL_HD_EN); + sgmii_if_mode |= ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_DUPLEX; + } + + if (speed == 1000) { + AL_REG_MASK_SET(mac_ctrl, AL_ETH_1G_MAC_CTRL_1G_SPD); + sgmii_if_mode |= ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_SPEED_1000; + } else { + AL_REG_MASK_CLEAR(mac_ctrl, AL_ETH_1G_MAC_CTRL_1G_SPD); + if (speed == 10) { + AL_REG_MASK_SET(mac_ctrl, AL_ETH_1G_MAC_CTRL_10M_SPD); + } else { + sgmii_if_mode |= ETH_MAC_SGMII_REG_DATA_IF_MODE_SGMII_SPEED_100; + AL_REG_MASK_CLEAR(mac_ctrl, AL_ETH_1G_MAC_CTRL_10M_SPD); + } + } + + if (adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) { + al_reg_write32(&adapter->mac_regs_base->sgmii.reg_addr, + ETH_MAC_SGMII_REG_ADDR_IF_MODE_REG); + al_reg_write32(&adapter->mac_regs_base->sgmii.reg_data, + sgmii_if_mode); + + al_reg_write32(&adapter->mac_regs_base->sgmii.reg_addr, + ETH_MAC_SGMII_REG_ADDR_CTRL_REG); + al_reg_write32(&adapter->mac_regs_base->sgmii.reg_data, + sgmii_ctrl); + } + + al_reg_write32(&adapter->mac_regs_base->mac_1g.cmd_cfg, mac_ctrl); + + return 0; +} + +int al_eth_mac_loopback_config(struct al_hal_eth_adapter *adapter, int enable) +{ + const char *state = (enable) ? "enable" : "disable"; + + al_dbg("eth [%s]: loopback %s\n", adapter->name, state); + if (AL_ETH_IS_1G_MAC(adapter->mac_mode)) { + uint32_t reg; + reg = al_reg_read32(&adapter->mac_regs_base->mac_1g.cmd_cfg); + if (enable) + reg |= AL_BIT(15); + else + reg &= ~AL_BIT(15); + al_reg_write32(&adapter->mac_regs_base->mac_1g.cmd_cfg, reg); + } else if ((AL_ETH_IS_10G_MAC(adapter->mac_mode) || AL_ETH_IS_10G_MAC(adapter->mac_mode)) && (adapter->rev_id == AL_ETH_REV_ID_3)) { + uint32_t reg; + al_reg_write16( + (uint16_t *)&adapter->mac_regs_base->kr.pcs_addr, ETH_MAC_KR_PCS_CONTROL_1_ADDR); + reg = al_reg_read16( + (uint16_t *)&adapter->mac_regs_base->kr.pcs_data); + if (enable) + reg |= AL_BIT(14); + else + reg &= ~AL_BIT(14); + al_reg_write16( + (uint16_t *)&adapter->mac_regs_base->kr.pcs_addr, ETH_MAC_KR_PCS_CONTROL_1_ADDR); + al_reg_write16( + (uint16_t *)&adapter->mac_regs_base->kr.pcs_data, reg); + } else if (adapter->mac_mode == AL_ETH_MAC_MODE_XLG_LL_40G) { + uint32_t reg; + reg = al_eth_40g_pcs_reg_read(adapter, ETH_MAC_GEN_V3_PCS_40G_CONTROL_STATUS_ADDR); + if (enable) + reg |= AL_BIT(14); + else + reg &= ~AL_BIT(14); + al_eth_40g_pcs_reg_write(adapter, ETH_MAC_GEN_V3_PCS_40G_CONTROL_STATUS_ADDR, reg); + } else { + al_err("Eth: mac loopback not supported in this mode %d", adapter->mac_mode); + return -EPERM; + } + return 0; +} + +/* MDIO */ +int al_eth_mdio_config( + struct al_hal_eth_adapter *adapter, + enum al_eth_mdio_type mdio_type, + al_bool shared_mdio_if, + enum al_eth_ref_clk_freq ref_clk_freq, + unsigned int mdio_clk_freq_khz) +{ + enum al_eth_mdio_if mdio_if = AL_ETH_MDIO_IF_10G_MAC; + const char *if_name = (mdio_if == AL_ETH_MDIO_IF_1G_MAC) ? "10/100/1G MAC" : "10G MAC"; + const char *type_name = (mdio_type == AL_ETH_MDIO_TYPE_CLAUSE_22) ? "Clause 22" : "Clause 45"; + const char *shared_name = (shared_mdio_if == AL_TRUE) ? "Yes" : "No"; + + unsigned int ref_clk_freq_khz; + uint32_t val; + + al_dbg("eth [%s]: mdio config: interface %s. type %s. shared: %s\n", adapter->name, if_name, type_name, shared_name); + adapter->shared_mdio_if = shared_mdio_if; + + val = al_reg_read32(&adapter->mac_regs_base->gen.cfg); + al_dbg("eth [%s]: mdio config: 10G mac \n", adapter->name); + + switch(mdio_if) + { + case AL_ETH_MDIO_IF_1G_MAC: + val &= ~AL_BIT(10); + break; + case AL_ETH_MDIO_IF_10G_MAC: + val |= AL_BIT(10); + break; + } + al_reg_write32(&adapter->mac_regs_base->gen.cfg, val); + adapter->mdio_if = mdio_if; + + + if (mdio_if == AL_ETH_MDIO_IF_10G_MAC) + { + val = al_reg_read32(&adapter->mac_regs_base->mac_10g.mdio_cfg_status); + switch(mdio_type) + { + case AL_ETH_MDIO_TYPE_CLAUSE_22: + val &= ~AL_BIT(6); + break; + case AL_ETH_MDIO_TYPE_CLAUSE_45: + val |= AL_BIT(6); + break; + } + + /* set clock div to get 'mdio_clk_freq_khz' */ + switch (ref_clk_freq) { + case AL_ETH_REF_FREQ_375_MHZ: + ref_clk_freq_khz = 375000; + break; + case AL_ETH_REF_FREQ_187_5_MHZ: + ref_clk_freq_khz = 187500; + break; + case AL_ETH_REF_FREQ_250_MHZ: + ref_clk_freq_khz = 250000; + break; + case AL_ETH_REF_FREQ_500_MHZ: + ref_clk_freq_khz = 500000; + break; + case AL_ETH_REF_FREQ_428_MHZ: + ref_clk_freq_khz = 428000; + break; + default: + al_err("eth [%s]: %s: invalid reference clock frequency" + " (%d)\n", + adapter->name, __func__, ref_clk_freq); + }; + + val &= ~(0x1FF << 7); + val |= (ref_clk_freq_khz / (2 * mdio_clk_freq_khz)) << 7; + AL_REG_FIELD_SET(val, ETH_10G_MAC_MDIO_CFG_HOLD_TIME_MASK, + ETH_10G_MAC_MDIO_CFG_HOLD_TIME_SHIFT, + ETH_10G_MAC_MDIO_CFG_HOLD_TIME_7_CLK); + al_reg_write32(&adapter->mac_regs_base->mac_10g.mdio_cfg_status, val); + }else{ + if(mdio_type != AL_ETH_MDIO_TYPE_CLAUSE_22) { + al_err("eth [%s] mdio type not supported for this interface\n", + adapter->name); + return -EINVAL; + } + } + adapter->mdio_type = mdio_type; + + return 0; +} + +int al_eth_mdio_1g_mac_read(struct al_hal_eth_adapter *adapter, + uint32_t phy_addr __attribute__((__unused__)), + uint32_t reg, uint16_t *val) +{ + *val = al_reg_read32( + &adapter->mac_regs_base->mac_1g.phy_regs_base + reg); + return 0; +} + +int al_eth_mdio_1g_mac_write(struct al_hal_eth_adapter *adapter, + uint32_t phy_addr __attribute__((__unused__)), + uint32_t reg, uint16_t val) +{ + al_reg_write32( + &adapter->mac_regs_base->mac_1g.phy_regs_base + reg, val); + return 0; +} + +int al_eth_mdio_10g_mac_wait_busy(struct al_hal_eth_adapter *adapter) +{ + int count = 0; + uint32_t mdio_cfg_status; + + do { + mdio_cfg_status = al_reg_read32(&adapter->mac_regs_base->mac_10g.mdio_cfg_status); +/* + if (mdio_cfg_status & AL_BIT(1)){ //error + al_err(" %s mdio read failed on error. phy_addr 0x%x reg 0x%x\n", + udma_params.name, phy_addr, reg); + return -EIO; + }*/ + if (mdio_cfg_status & AL_BIT(0)){ + if (count > 0) + al_dbg("eth [%s] mdio: still busy!\n", adapter->name); + }else{ + return 0; + } + al_udelay(AL_ETH_MDIO_DELAY_PERIOD); + }while(count++ < AL_ETH_MDIO_DELAY_COUNT); + + return -ETIMEDOUT; +} + +int al_eth_mdio_10g_mac_type22(struct al_hal_eth_adapter *adapter, int read, uint32_t phy_addr, uint32_t reg, uint16_t *val) +{ + int rc; + const char *op = (read == 1) ? "read":"write"; + uint32_t mdio_cfg_status; + uint16_t mdio_cmd; + + //wait if the HW is busy + rc = al_eth_mdio_10g_mac_wait_busy(adapter); + if (rc) { + al_err(" eth [%s] mdio %s failed. HW is busy\n", adapter->name, op); + return rc; + } + + mdio_cmd = (uint16_t)(0x1F & reg); + mdio_cmd |= (0x1F & phy_addr) << 5; + + if (read) + mdio_cmd |= AL_BIT(15); //READ command + + al_reg_write16(&adapter->mac_regs_base->mac_10g.mdio_cmd, + mdio_cmd); + if (!read) + al_reg_write16(&adapter->mac_regs_base->mac_10g.mdio_data, + *val); + + //wait for the busy to clear + rc = al_eth_mdio_10g_mac_wait_busy(adapter); + if (rc != 0) { + al_err(" %s mdio %s failed on timeout\n", adapter->name, op); + return -ETIMEDOUT; + } + + mdio_cfg_status = al_reg_read32(&adapter->mac_regs_base->mac_10g.mdio_cfg_status); + + if (mdio_cfg_status & AL_BIT(1)){ //error +// al_err(" %s mdio %s failed on error. phy_addr 0x%x reg 0x%x\n", +// adapter->name, op, phy_addr, reg); + return -EIO; + } + if (read) + *val = al_reg_read16( + (uint16_t *)&adapter->mac_regs_base->mac_10g.mdio_data); + return 0; +} + +int al_eth_mdio_10g_mac_type45(struct al_hal_eth_adapter *adapter, int read, uint32_t port_addr, uint32_t device, uint32_t reg, uint16_t *val) +{ + int rc; + const char *op = (read == 1) ? "read":"write"; + uint32_t mdio_cfg_status; + uint16_t mdio_cmd; + + //wait if the HW is busy + rc = al_eth_mdio_10g_mac_wait_busy(adapter); + if (rc) { + al_err(" %s mdio %s failed. HW is busy\n", adapter->name, op); + return rc; + } + // set command register + mdio_cmd = (uint16_t)(0x1F & device); + mdio_cmd |= (0x1F & port_addr) << 5; + al_reg_write16(&adapter->mac_regs_base->mac_10g.mdio_cmd, + mdio_cmd); + + // send address frame + al_reg_write16(&adapter->mac_regs_base->mac_10g.mdio_regaddr, reg); + //wait for the busy to clear + rc = al_eth_mdio_10g_mac_wait_busy(adapter); + if (rc) { + al_err(" %s mdio %s (address frame) failed on timeout\n", adapter->name, op); + return rc; + } + + // if read, write again to the command register with READ bit set + if (read) { + mdio_cmd |= AL_BIT(15); //READ command + al_reg_write16( + (uint16_t *)&adapter->mac_regs_base->mac_10g.mdio_cmd, + mdio_cmd); + } else { + al_reg_write16( + (uint16_t *)&adapter->mac_regs_base->mac_10g.mdio_data, + *val); + } + //wait for the busy to clear + rc = al_eth_mdio_10g_mac_wait_busy(adapter); + if (rc) { + al_err(" %s mdio %s failed on timeout\n", adapter->name, op); + return rc; + } + + mdio_cfg_status = al_reg_read32(&adapter->mac_regs_base->mac_10g.mdio_cfg_status); + + if (mdio_cfg_status & AL_BIT(1)){ //error +// al_err(" %s mdio %s failed on error. port 0x%x, device 0x%x reg 0x%x\n", +// adapter->name, op, port_addr, device, reg); + return -EIO; + } + if (read) + *val = al_reg_read16( + (uint16_t *)&adapter->mac_regs_base->mac_10g.mdio_data); + return 0; +} + +/** + * acquire mdio interface ownership + * when mdio interface shared between multiple eth controllers, this function waits until the ownership granted for this controller. + * this function does nothing when the mdio interface is used only by this controller. + * + * @param adapter + * @return 0 on success, -ETIMEDOUT on timeout. + */ +static int al_eth_mdio_lock(struct al_hal_eth_adapter *adapter) +{ + int count = 0; + uint32_t mdio_ctrl_1; + + if (adapter->shared_mdio_if == AL_FALSE) + return 0; /* nothing to do when interface is not shared */ + + do { + mdio_ctrl_1 = al_reg_read32(&adapter->mac_regs_base->gen.mdio_ctrl_1); +/* + if (mdio_cfg_status & AL_BIT(1)){ //error + al_err(" %s mdio read failed on error. phy_addr 0x%x reg 0x%x\n", + udma_params.name, phy_addr, reg); + return -EIO; + }*/ + if (mdio_ctrl_1 & AL_BIT(0)){ + if (count > 0) + al_dbg("eth %s mdio interface still busy!\n", adapter->name); + }else{ + return 0; + } + al_udelay(AL_ETH_MDIO_DELAY_PERIOD); + }while(count++ < (AL_ETH_MDIO_DELAY_COUNT * 4)); + al_err(" %s mdio failed to take ownership. MDIO info reg: 0x%08x\n", + adapter->name, al_reg_read32(&adapter->mac_regs_base->gen.mdio_1)); + + return -ETIMEDOUT; +} + +/** + * free mdio interface ownership + * when mdio interface shared between multiple eth controllers, this function releases the ownership granted for this controller. + * this function does nothing when the mdio interface is used only by this controller. + * + * @param adapter + * @return 0. + */ +static int al_eth_mdio_free(struct al_hal_eth_adapter *adapter) +{ + if (adapter->shared_mdio_if == AL_FALSE) + return 0; /* nothing to do when interface is not shared */ + + al_reg_write32(&adapter->mac_regs_base->gen.mdio_ctrl_1, 0); + + /* + * Addressing RMN: 2917 + * + * RMN description: + * The HW spin-lock is stateless and doesn't maintain any scheduling + * policy. + * + * Software flow: + * After getting the lock wait 2 times the delay period in order to give + * the other port chance to take the lock and prevent starvation. + * This is not scalable to more than two ports. + */ + al_udelay(2 * AL_ETH_MDIO_DELAY_PERIOD); + + return 0; +} + +int al_eth_mdio_read(struct al_hal_eth_adapter *adapter, uint32_t phy_addr, uint32_t device, uint32_t reg, uint16_t *val) +{ + int rc; + rc = al_eth_mdio_lock(adapter); + + /*"interface ownership taken"*/ + if (rc) + return rc; + + if (adapter->mdio_if == AL_ETH_MDIO_IF_1G_MAC) + rc = al_eth_mdio_1g_mac_read(adapter, phy_addr, reg, val); + else + if (adapter->mdio_type == AL_ETH_MDIO_TYPE_CLAUSE_22) + rc = al_eth_mdio_10g_mac_type22(adapter, 1, phy_addr, reg, val); + else + rc = al_eth_mdio_10g_mac_type45(adapter, 1, phy_addr, device, reg, val); + + al_eth_mdio_free(adapter); + al_dbg("eth mdio read: phy_addr %x, device %x, reg %x val %x\n", phy_addr, device, reg, *val); + return rc; +} + +int al_eth_mdio_write(struct al_hal_eth_adapter *adapter, uint32_t phy_addr, uint32_t device, uint32_t reg, uint16_t val) +{ + int rc; + al_dbg("eth mdio write: phy_addr %x, device %x, reg %x, val %x\n", phy_addr, device, reg, val); + rc = al_eth_mdio_lock(adapter); + /* interface ownership taken */ + if (rc) + return rc; + + if (adapter->mdio_if == AL_ETH_MDIO_IF_1G_MAC) + rc = al_eth_mdio_1g_mac_write(adapter, phy_addr, reg, val); + else + if (adapter->mdio_type == AL_ETH_MDIO_TYPE_CLAUSE_22) + rc = al_eth_mdio_10g_mac_type22(adapter, 0, phy_addr, reg, &val); + else + rc = al_eth_mdio_10g_mac_type45(adapter, 0, phy_addr, device, reg, &val); + + al_eth_mdio_free(adapter); + return rc; +} + +void al_dump_tx_desc(union al_udma_desc *tx_desc) +{ + uint32_t *ptr = (uint32_t *)tx_desc; + al_dbg("eth tx desc:\n"); + al_dbg("0x%08x\n", *(ptr++)); + al_dbg("0x%08x\n", *(ptr++)); + al_dbg("0x%08x\n", *(ptr++)); + al_dbg("0x%08x\n", *(ptr++)); +} + +static void +al_dump_tx_pkt(struct al_udma_q *tx_dma_q, struct al_eth_pkt *pkt) +{ + const char *tso = (pkt->flags & AL_ETH_TX_FLAGS_TSO) ? "TSO" : ""; + const char *l3_csum = (pkt->flags & AL_ETH_TX_FLAGS_IPV4_L3_CSUM) ? "L3 CSUM" : ""; + const char *l4_csum = (pkt->flags & AL_ETH_TX_FLAGS_L4_CSUM) ? + ((pkt->flags & AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM) ? "L4 PARTIAL CSUM" : "L4 FULL CSUM") : ""; + const char *fcs = (pkt->flags & AL_ETH_TX_FLAGS_L2_DIS_FCS) ? "Disable FCS" : ""; + const char *ptp = (pkt->flags & AL_ETH_TX_FLAGS_TS) ? "TX_PTP" : ""; + const char *l3_proto_name = "unknown"; + const char *l4_proto_name = "unknown"; + const char *outer_l3_proto_name = "N/A"; + const char *tunnel_mode = (pkt->tunnel_mode & AL_ETH_TUNNEL_NO_UDP) ? "TUNNEL" : ""; + uint32_t total_len = 0; + int i; + + al_dbg("[%s %d]: flags: %s %s %s %s %s %s\n", tx_dma_q->udma->name, tx_dma_q->qid, + tso, l3_csum, l4_csum, fcs, ptp, tunnel_mode); + + switch (pkt->l3_proto_idx) { + case AL_ETH_PROTO_ID_IPv4: + l3_proto_name = "IPv4"; + break; + case AL_ETH_PROTO_ID_IPv6: + l3_proto_name = "IPv6"; + break; + default: + l3_proto_name = "unknown"; + break; + } + + switch (pkt->l4_proto_idx) { + case AL_ETH_PROTO_ID_TCP: + l4_proto_name = "TCP"; + break; + case AL_ETH_PROTO_ID_UDP: + l4_proto_name = "UDP"; + break; + default: + l4_proto_name = "unknown"; + break; + } + + switch (pkt->outer_l3_proto_idx) { + case AL_ETH_PROTO_ID_IPv4: + outer_l3_proto_name = "IPv4"; + break; + case AL_ETH_PROTO_ID_IPv6: + outer_l3_proto_name = "IPv6"; + break; + default: + outer_l3_proto_name = "N/A"; + break; + } + + al_dbg("[%s %d]: L3 proto: %d (%s). L4 proto: %d (%s). Outer_L3 proto: %d (%s). vlan source count %d. mod add %d. mod del %d\n", + tx_dma_q->udma->name, tx_dma_q->qid, pkt->l3_proto_idx, + l3_proto_name, pkt->l4_proto_idx, l4_proto_name, + pkt->outer_l3_proto_idx, outer_l3_proto_name, + pkt->source_vlan_count, pkt->vlan_mod_add_count, + pkt->vlan_mod_del_count); + + if (pkt->meta) { + const char * store = pkt->meta->store ? "Yes" : "No"; + + al_dbg("[%s %d]: tx pkt with meta data. words valid %x\n", + tx_dma_q->udma->name, tx_dma_q->qid, + pkt->meta->words_valid); + if (tx_dma_q->rev_id == AL_ETH_REV_ID_0) + al_dbg("[%s %d]: meta: store to cache %s. l3 hdr len %d. " + "l3 hdr offset %d. l4 hdr len %d. mss sel %d\n", + tx_dma_q->udma->name, tx_dma_q->qid, store, + pkt->meta->l3_header_len, pkt->meta->l3_header_offset, + pkt->meta->l4_header_len, + pkt->meta->mss_idx_sel); + else { + const char *ptp_val = (pkt->flags & AL_ETH_TX_FLAGS_TS) ? "Yes" : "No"; + al_dbg("[%s %d]: meta: store to cache %s. l3 hdr len %d. " + "l3 hdr offset %d. l4 hdr len %d. mss val %d ts_index %d ts_val:%s \n", + tx_dma_q->udma->name, tx_dma_q->qid, store, + pkt->meta->l3_header_len, pkt->meta->l3_header_offset, + pkt->meta->l4_header_len,pkt->meta->mss_val, + pkt->meta->ts_index,ptp_val); + al_dbg("outer_l3_hdr_offset %d. outer_l3_len %d. \n", + pkt->meta->outer_l3_len, pkt->meta->outer_l3_offset); + } + } + + al_dbg("[%s %d]: num of bufs: %d\n", tx_dma_q->udma->name, tx_dma_q->qid, + pkt->num_of_bufs); + for (i = 0; i < pkt->num_of_bufs; i++) { + al_dbg("eth [%s %d]: buf[%d]: len 0x%08x. address 0x%016llx\n", tx_dma_q->udma->name, tx_dma_q->qid, + i, pkt->bufs[i].len, (unsigned long long)pkt->bufs[i].addr); + total_len += pkt->bufs[i].len; + } + al_dbg("[%s %d]: total len: 0x%08x\n", tx_dma_q->udma->name, tx_dma_q->qid, total_len); + +} + +/* TX */ +/** + * add packet to transmission queue + */ +int al_eth_tx_pkt_prepare(struct al_udma_q *tx_dma_q, struct al_eth_pkt *pkt) +{ + union al_udma_desc *tx_desc; + uint32_t tx_descs; + uint32_t flags = AL_M2S_DESC_FIRST | AL_M2S_DESC_CONCAT; + uint64_t vmid = ((uint64_t)pkt->vmid) << AL_UDMA_DESC_VMID_SHIFT; + uint32_t meta_ctrl; + uint32_t ring_id; + int buf_idx; + + al_dbg("[%s %d]: new tx pkt\n", tx_dma_q->udma->name, tx_dma_q->qid); + + al_dump_tx_pkt(tx_dma_q, pkt); + + tx_descs = pkt->num_of_bufs; + if (pkt->meta) { + tx_descs += 1; + } + if (unlikely(al_udma_available_get(tx_dma_q) < tx_descs)) { + al_dbg("[%s %d]: failed to allocate (%d) descriptors", + tx_dma_q->udma->name, tx_dma_q->qid, tx_descs); + return 0; + } + if (pkt->meta) { + uint32_t meta_word_0 = pkt->flags & AL_ETH_TX_FLAGS_INT; + uint32_t meta_word_1 = 0; + uint32_t meta_word_2 = 0; + uint32_t meta_word_3 = 0; + + meta_word_0 |= AL_M2S_DESC_FIRST | AL_M2S_DESC_META_DATA; + flags &= ~(AL_M2S_DESC_FIRST | AL_ETH_TX_FLAGS_INT); + + tx_desc = al_udma_desc_get(tx_dma_q); + /* get ring id, and clear FIRST and Int flags */ + ring_id = al_udma_ring_id_get(tx_dma_q) << + AL_M2S_DESC_RING_ID_SHIFT; + + meta_word_0 |= ring_id; + meta_word_0 |= pkt->meta->words_valid << 12; + + if (pkt->meta->store) + meta_word_0 |= AL_ETH_TX_META_STORE; + + if (pkt->meta->words_valid & 1) { + meta_word_0 |= pkt->meta->vlan1_cfi_sel; + meta_word_0 |= pkt->meta->vlan2_vid_sel << 2; + meta_word_0 |= pkt->meta->vlan2_cfi_sel << 4; + meta_word_0 |= pkt->meta->vlan2_pbits_sel << 6; + meta_word_0 |= pkt->meta->vlan2_ether_sel << 8; + } + + if (pkt->meta->words_valid & 2) { + meta_word_1 = pkt->meta->vlan1_new_vid; + meta_word_1 |= pkt->meta->vlan1_new_cfi << 12; + meta_word_1 |= pkt->meta->vlan1_new_pbits << 13; + meta_word_1 |= pkt->meta->vlan2_new_vid << 16; + meta_word_1 |= pkt->meta->vlan2_new_cfi << 28; + meta_word_1 |= pkt->meta->vlan2_new_pbits << 29; + } + + if (pkt->meta->words_valid & 4) { + meta_word_2 = pkt->meta->l3_header_len & AL_ETH_TX_META_L3_LEN_MASK; + meta_word_2 |= (pkt->meta->l3_header_offset & AL_ETH_TX_META_L3_OFF_MASK) << + AL_ETH_TX_META_L3_OFF_SHIFT; + meta_word_2 |= (pkt->meta->l4_header_len & 0x3f) << 16; + + if (tx_dma_q->rev_id == AL_ETH_REV_ID_0) { + meta_word_2 |= (pkt->meta->mss_idx_sel & 7) << 24; + } else { + uint32_t l3_offset; + + if (unlikely(pkt->flags & AL_ETH_TX_FLAGS_TS)) + meta_word_0 |= pkt->meta->ts_index << AL_ETH_TX_META_MSS_MSB_TS_VAL_SHIFT; + else + meta_word_0 |= (((pkt->meta->mss_val & 0x3c00) >> 10) + << AL_ETH_TX_META_MSS_MSB_TS_VAL_SHIFT); + meta_word_2 |= ((pkt->meta->mss_val & 0x03ff) + << AL_ETH_TX_META_MSS_LSB_VAL_SHIFT); + + /* + * move from bytes to multiplication of 2 as the HW + * expect to get it + */ + l3_offset = (pkt->meta->outer_l3_offset >> 1); + + meta_word_0 |= + (((l3_offset & + AL_ETH_TX_META_OUTER_L3_OFF_HIGH_MASK) >> 3) + << AL_ETH_TX_META_OUTER_L3_OFF_HIGH_SHIFT); + + meta_word_3 |= + ((l3_offset & + AL_ETH_TX_META_OUTER_L3_OFF_LOW_MASK) + << AL_ETH_TX_META_OUTER_L3_OFF_LOW_SHIFT); + + /* + * shift right 2 bits to work in multiplication of 4 + * as the HW expect to get it + */ + meta_word_3 |= + (((pkt->meta->outer_l3_len >> 2) & + AL_ETH_TX_META_OUTER_L3_LEN_MASK) + << AL_ETH_TX_META_OUTER_L3_LEN_SHIFT); + } + } + +#ifdef AL_ETH_SUPPORT_MACSEC + if (pkt->meta->words_valid & 8) { + /* MacSec packets */ + if (pkt->flags & AL_ETH_TX_FLAGS_L2_MACSEC_PKT) { /* pkt is macsec protected */ + meta_word_3 |= pkt->macsec_sign << AL_ETH_TX_MACSEC_SIGN_SHIFT; + meta_word_3 |= pkt->macsec_encrypt << AL_ETH_TX_MACSEC_ENCRYPT_SHIFT; + meta_word_3 |= pkt->macsec_association_number << AL_ETH_TX_MACSEC_AN_LSB_SHIFT; + meta_word_3 |= pkt->macsec_secure_channel << AL_ETH_TX_MACSEC_SC_LSB_SHIFT; + meta_word_3 |= pkt->macsec_secured_pyld_len << AL_ETH_TX_MACSEC_SECURED_PYLD_LEN_LSB_SHIFT; + } + } +#endif + tx_desc->tx_meta.len_ctrl = swap32_to_le(meta_word_0); + tx_desc->tx_meta.meta_ctrl = swap32_to_le(meta_word_1); + tx_desc->tx_meta.meta1 = swap32_to_le(meta_word_2); + tx_desc->tx_meta.meta2 = swap32_to_le(meta_word_3); + al_dump_tx_desc(tx_desc); + } + +#ifdef AL_ETH_SUPPORT_DDP + if (pkt->ext_meta_data != NULL) + al_eth_ddp_ext_metadata_create(tx_dma_q, &flags, pkt->ext_meta_data); +#endif + + meta_ctrl = pkt->flags & AL_ETH_TX_PKT_META_FLAGS; + + /* L4_PARTIAL_CSUM without L4_CSUM is invalid option */ + al_assert((pkt->flags & (AL_ETH_TX_FLAGS_L4_CSUM|AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM)) != + AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM); + + /* TSO packets can't have Timestamp enabled */ + al_assert((pkt->flags & (AL_ETH_TX_FLAGS_TSO|AL_ETH_TX_FLAGS_TS)) != + (AL_ETH_TX_FLAGS_TSO|AL_ETH_TX_FLAGS_TS)); + + meta_ctrl |= pkt->l3_proto_idx; + meta_ctrl |= pkt->l4_proto_idx << AL_ETH_TX_L4_PROTO_IDX_SHIFT; + meta_ctrl |= pkt->source_vlan_count << AL_ETH_TX_SRC_VLAN_CNT_SHIFT; + meta_ctrl |= pkt->vlan_mod_add_count << AL_ETH_TX_VLAN_MOD_ADD_SHIFT; + meta_ctrl |= pkt->vlan_mod_del_count << AL_ETH_TX_VLAN_MOD_DEL_SHIFT; + meta_ctrl |= pkt->vlan_mod_v1_ether_sel << AL_ETH_TX_VLAN_MOD_E_SEL_SHIFT; + meta_ctrl |= pkt->vlan_mod_v1_vid_sel << AL_ETH_TX_VLAN_MOD_VID_SEL_SHIFT; + meta_ctrl |= pkt->vlan_mod_v1_pbits_sel << AL_ETH_TX_VLAN_MOD_PBIT_SEL_SHIFT; + + if (tx_dma_q->rev_id > AL_ETH_REV_ID_0) { + meta_ctrl |= pkt->tunnel_mode << AL_ETH_TX_TUNNEL_MODE_SHIFT; + if (pkt->outer_l3_proto_idx == AL_ETH_PROTO_ID_IPv4) + meta_ctrl |= 1 << AL_ETH_TX_OUTER_L3_PROTO_SHIFT; + } + + flags |= pkt->flags & AL_ETH_TX_PKT_UDMA_FLAGS; + for(buf_idx = 0; buf_idx < pkt->num_of_bufs; buf_idx++ ) { + uint32_t flags_len = flags; + + tx_desc = al_udma_desc_get(tx_dma_q); + /* get ring id, and clear FIRST and Int flags */ + ring_id = al_udma_ring_id_get(tx_dma_q) << + AL_M2S_DESC_RING_ID_SHIFT; + + flags_len |= ring_id; + + if (buf_idx == (pkt->num_of_bufs - 1)) + flags_len |= AL_M2S_DESC_LAST; + + /* clear First and Int flags */ + flags &= AL_ETH_TX_FLAGS_NO_SNOOP; + flags |= AL_M2S_DESC_CONCAT; + + flags_len |= pkt->bufs[buf_idx].len & AL_M2S_DESC_LEN_MASK; + tx_desc->tx.len_ctrl = swap32_to_le(flags_len); + if (buf_idx == 0) + tx_desc->tx.meta_ctrl = swap32_to_le(meta_ctrl); + tx_desc->tx.buf_ptr = swap64_to_le( + pkt->bufs[buf_idx].addr | vmid); + al_dump_tx_desc(tx_desc); + } + + al_dbg("[%s %d]: pkt descriptors written into the tx queue. descs num (%d)\n", + tx_dma_q->udma->name, tx_dma_q->qid, tx_descs); + + return tx_descs; +} + + +void al_eth_tx_dma_action(struct al_udma_q *tx_dma_q, uint32_t tx_descs) +{ + /* add tx descriptors */ + al_udma_desc_action_add(tx_dma_q, tx_descs); +} + +/** + * get number of completed tx descriptors, upper layer should derive from + */ +int al_eth_comp_tx_get(struct al_udma_q *tx_dma_q) +{ + int rc; + + rc = al_udma_cdesc_get_all(tx_dma_q, NULL); + if (rc != 0) { + al_udma_cdesc_ack(tx_dma_q, rc); + al_dbg("[%s %d]: tx completion: descs (%d)\n", + tx_dma_q->udma->name, tx_dma_q->qid, rc); + } + + return rc; +} + +/** + * configure the TSO MSS val + */ +int al_eth_tso_mss_config(struct al_hal_eth_adapter *adapter, uint8_t idx, uint32_t mss_val) +{ + + al_assert(idx <= 8); /*valid MSS index*/ + al_assert(mss_val <= AL_ETH_TSO_MSS_MAX_VAL); /*valid MSS val*/ + al_assert(mss_val >= AL_ETH_TSO_MSS_MIN_VAL); /*valid MSS val*/ + + al_reg_write32(&adapter->ec_regs_base->tso_sel[idx].mss, mss_val); + return 0; +} + + +/* RX */ +/** + * config the rx descriptor fields + */ +void al_eth_rx_desc_config( + struct al_hal_eth_adapter *adapter, + enum al_eth_rx_desc_lro_context_val_res lro_sel, + enum al_eth_rx_desc_l4_offset_sel l4_offset_sel, + enum al_eth_rx_desc_l3_offset_sel l3_offset_sel, + enum al_eth_rx_desc_l4_chk_res_sel l4_sel, + enum al_eth_rx_desc_l3_chk_res_sel l3_sel, + enum al_eth_rx_desc_l3_proto_idx_sel l3_proto_sel, + enum al_eth_rx_desc_l4_proto_idx_sel l4_proto_sel, + enum al_eth_rx_desc_frag_sel frag_sel) +{ + uint32_t reg_val = 0; + + reg_val |= (lro_sel == AL_ETH_L4_OFFSET) ? + EC_RFW_CFG_A_0_LRO_CONTEXT_SEL : 0; + + reg_val |= (l4_sel == AL_ETH_L4_INNER_OUTER_CHK) ? + EC_RFW_CFG_A_0_META_L4_CHK_RES_SEL : 0; + + reg_val |= l3_sel << EC_RFW_CFG_A_0_META_L3_CHK_RES_SEL_SHIFT; + + al_reg_write32(&adapter->ec_regs_base->rfw.cfg_a_0, reg_val); + + reg_val = al_reg_read32(&adapter->ec_regs_base->rfw.meta); + if (l3_proto_sel == AL_ETH_L3_PROTO_IDX_INNER) + reg_val |= EC_RFW_META_L3_PROT_SEL; + else + reg_val &= ~EC_RFW_META_L3_PROT_SEL; + + if (l4_proto_sel == AL_ETH_L4_PROTO_IDX_INNER) + reg_val |= EC_RFW_META_L4_PROT_SEL; + else + reg_val &= ~EC_RFW_META_L4_PROT_SEL; + + if (l4_offset_sel == AL_ETH_L4_OFFSET_INNER) + reg_val |= EC_RFW_META_L4_OFFSET_SEL; + else + reg_val &= ~EC_RFW_META_L4_OFFSET_SEL; + + if (l3_offset_sel == AL_ETH_L3_OFFSET_INNER) + reg_val |= EC_RFW_META_L3_OFFSET_SEL; + else + reg_val &= ~EC_RFW_META_L3_OFFSET_SEL; + + if (frag_sel == AL_ETH_FRAG_INNER) + reg_val |= EC_RFW_META_FRAG_SEL; + else + reg_val &= ~EC_RFW_META_FRAG_SEL; + + + al_reg_write32(&adapter->ec_regs_base->rfw.meta, reg_val); +} + +/** + * Configure RX header split + */ +int al_eth_rx_header_split_config(struct al_hal_eth_adapter *adapter, al_bool enable, uint32_t header_len) +{ + uint32_t reg; + + if (adapter->rev_id < AL_ETH_REV_ID_1) { + al_err("[%s]: header split feature not supported by this revision\n", adapter->name); + return -EINVAL; + } + reg = al_reg_read32(&adapter->ec_regs_base->rfw.hdr_split); + if (enable == AL_TRUE) + reg |= EC_RFW_HDR_SPLIT_EN; + else + reg &= ~EC_RFW_HDR_SPLIT_EN; + + AL_REG_FIELD_SET(reg, EC_RFW_HDR_SPLIT_DEF_LEN_MASK, EC_RFW_HDR_SPLIT_DEF_LEN_SHIFT, header_len); + al_reg_write32(&adapter->ec_regs_base->rfw.hdr_split, reg); + return 0; +} + + +/** + * add buffer to receive queue + */ +int al_eth_rx_buffer_add(struct al_udma_q *rx_dma_q, + struct al_buf *buf, uint32_t flags, + struct al_buf *header_buf) +{ + uint64_t vmid = ((uint64_t)flags & AL_ETH_RX_FLAGS_VMID_MASK) << + AL_UDMA_DESC_VMID_SHIFT; + uint32_t flags_len = flags & ~AL_ETH_RX_FLAGS_VMID_MASK; + union al_udma_desc *rx_desc; + + al_dbg("[%s %d]: add rx buffer.\n", rx_dma_q->udma->name, rx_dma_q->qid); + +#if 1 + if (unlikely(al_udma_available_get(rx_dma_q) < 1)) { + al_dbg("[%s]: rx q (%d) has no enough free descriptor", + rx_dma_q->udma->name, rx_dma_q->qid); + return -ENOSPC; + } +#endif + rx_desc = al_udma_desc_get(rx_dma_q); + + flags_len |= al_udma_ring_id_get(rx_dma_q) << AL_S2M_DESC_RING_ID_SHIFT; + flags_len |= buf->len & AL_S2M_DESC_LEN_MASK; + + if (flags & AL_ETH_RX_FLAGS_DUAL_BUF) { + al_assert(header_buf != NULL); /*header valid in dual buf */ + al_assert(AL_ADDR_HIGH(buf->addr) == AL_ADDR_HIGH(header_buf->addr)); /* high bits of addresses must be the same */ + + flags_len |= (header_buf->len << AL_S2M_DESC_LEN_SHIFT) & + AL_S2M_DESC_LEN_MASK; + rx_desc->rx.buf2_ptr_lo = swap32_to_le(AL_ADDR_LOW(header_buf->addr)); + } + rx_desc->rx.len_ctrl = swap32_to_le(flags_len); + rx_desc->rx.buf1_ptr = swap64_to_le(buf->addr | vmid); + + return 0; +} + +/** + * notify the hw engine about rx descriptors that were added to the receive queue + */ +void al_eth_rx_buffer_action(struct al_udma_q *rx_dma_q, uint32_t descs_num) +{ + al_dbg("[%s]: update the rx engine tail pointer: queue %d. descs %d\n", + rx_dma_q->udma->name, rx_dma_q->qid, descs_num); + + /* add rx descriptor */ + al_udma_desc_action_add(rx_dma_q, descs_num); +} + +/** + * get packet from RX completion ring + */ +uint32_t al_eth_pkt_rx(struct al_udma_q *rx_dma_q, struct al_eth_pkt *pkt) +{ + volatile union al_udma_cdesc *cdesc; + al_eth_rx_cdesc *rx_desc; + uint32_t i; + uint32_t rc; + + rc = al_udma_cdesc_packet_get(rx_dma_q, &cdesc); + if (rc == 0) + return 0; + + al_assert(rc <= AL_ETH_PKT_MAX_BUFS); + + al_dbg("[%s]: fetch rx packet: queue %d.\n", + rx_dma_q->udma->name, rx_dma_q->qid); + + for (i = 0; i < rc; i++) { + uint32_t len; + + /* get next descriptor */ + rx_desc = (al_eth_rx_cdesc *)al_cdesc_next(rx_dma_q, cdesc, i); + + len = swap32_from_le(rx_desc->len); + + if ((i == 0) && (swap32_from_le(rx_desc->word2) & AL_UDMA_CDESC_BUF2_USED)) + pkt->rx_header_len = (len & AL_S2M_DESC_LEN2_MASK) >> + AL_S2M_DESC_LEN2_SHIFT; + + pkt->bufs[i].len = len & AL_S2M_DESC_LEN_MASK; + } + /* get flags from last desc */ + pkt->flags = swap32_from_le(rx_desc->ctrl_meta); +#ifdef AL_ETH_RX_DESC_RAW_GET + pkt->rx_desc_raw[0] = pkt->flags; + pkt->rx_desc_raw[1] = swap32_from_le(rx_desc->len); + pkt->rx_desc_raw[2] = swap32_from_le(rx_desc->word2); + pkt->rx_desc_raw[3] = swap32_from_le(rx_desc->word3); +#endif + /* update L3/L4 proto index */ + pkt->l3_proto_idx = pkt->flags & AL_ETH_RX_L3_PROTO_IDX_MASK; + pkt->l4_proto_idx = (pkt->flags >> AL_ETH_RX_L4_PROTO_IDX_SHIFT) & AL_ETH_RX_L4_PROTO_IDX_MASK; + pkt->rxhash = (swap32_from_le(rx_desc->len) & AL_ETH_RX_HASH_MASK) >> + AL_ETH_RX_HASH_SHIFT; + pkt->l3_offset = (swap32_from_le(rx_desc->word2) & AL_ETH_RX_L3_OFFSET_MASK) >> AL_ETH_RX_L3_OFFSET_SHIFT; + +#ifdef AL_ETH_SUPPORT_MACSEC + /* update MacSec related info */ + pkt->macsec_rx_flags = (swap32_from_le(rx_desc->word3) & AL_ETH_MACSEC_RX_FLAGS_MASK) >> + AL_ETH_MACSEC_RX_FLAGS_LSB_SHIFT; + if (pkt->macsec_rx_flags & AL_ETH_MACSEC_RX_FLAGS_IS_MACSEC) { + pkt->macsec_association_number = (pkt->macsec_rx_flags & AL_ETH_MACSEC_RX_FLAGS_AN_MASK) >> + AL_ETH_MACSEC_RX_FLAGS_AN_LSB_SHIFT; + pkt->macsec_secured_pyld_len = (pkt->macsec_rx_flags & AL_ETH_MACSEC_RX_FLAGS_SL_MASK) >> + AL_ETH_MACSEC_RX_FLAGS_SL_LSB_SHIFT; + pkt->macsec_encrypt = (pkt->macsec_rx_flags & AL_ETH_MACSEC_RX_FLAGS_IS_ENCRYPTED_MASK) >> + AL_ETH_MACSEC_RX_FLAGS_IS_ENCRYPTED_SHIFT; + pkt->macsec_sign = (pkt->macsec_rx_flags & AL_ETH_MACSEC_RX_FLAGS_IS_SIGNED_MASK) >> + AL_ETH_MACSEC_RX_FLAGS_IS_SIGNED_SHIFT; + } +#endif + + al_udma_cdesc_ack(rx_dma_q, rc); + return rc; +} + + +int al_eth_rx_parser_entry_update(struct al_hal_eth_adapter *adapter, uint32_t idx, + struct al_eth_epe_p_reg_entry *reg_entry, + struct al_eth_epe_control_entry *control_entry) +{ + al_eth_epe_entry_set(adapter, idx, reg_entry, control_entry); + return 0; +} + +#define AL_ETH_THASH_UDMA_SHIFT 0 +#define AL_ETH_THASH_UDMA_MASK (0xF << AL_ETH_THASH_UDMA_SHIFT) + +#define AL_ETH_THASH_Q_SHIFT 4 +#define AL_ETH_THASH_Q_MASK (0x3 << AL_ETH_THASH_Q_SHIFT) + +int al_eth_thash_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t udma, uint32_t queue) +{ + uint32_t entry; + al_assert(idx < AL_ETH_RX_THASH_TABLE_SIZE); /*valid THASH index*/ + + entry = (udma << AL_ETH_THASH_UDMA_SHIFT) & AL_ETH_THASH_UDMA_MASK; + entry |= (queue << AL_ETH_THASH_Q_SHIFT) & AL_ETH_THASH_Q_MASK; + + al_reg_write32(&adapter->ec_regs_base->rfw.thash_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->rfw.thash_table_data, entry); + return 0; +} + +int al_eth_fsm_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t entry) +{ + + al_assert(idx < AL_ETH_RX_FSM_TABLE_SIZE); /*valid FSM index*/ + + + al_reg_write32(&adapter->ec_regs_base->rfw.fsm_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->rfw.fsm_table_data, entry); + return 0; +} + +static uint32_t al_eth_fwd_ctrl_entry_to_val(struct al_eth_fwd_ctrl_table_entry *entry) +{ + uint32_t val = 0; + AL_REG_FIELD_SET(val, AL_FIELD_MASK(3,0), 0, entry->prio_sel); + AL_REG_FIELD_SET(val, AL_FIELD_MASK(7,4), 4, entry->queue_sel_1); + AL_REG_FIELD_SET(val, AL_FIELD_MASK(9,8), 8, entry->queue_sel_2); + AL_REG_FIELD_SET(val, AL_FIELD_MASK(13,10), 10, entry->udma_sel); + AL_REG_FIELD_SET(val, AL_FIELD_MASK(18,15), 15, entry->hdr_split_len_sel); + AL_REG_BIT_VAL_SET(val, 19, !!(entry->filter == AL_TRUE)); + + return val; +} + +static int al_eth_ctrl_index_match(struct al_eth_fwd_ctrl_table_index *index, uint32_t i) { + if ((index->vlan_table_out != AL_ETH_FWD_CTRL_IDX_VLAN_TABLE_OUT_ANY) + && (index->vlan_table_out != AL_REG_BIT_GET(i, 0))) + return 0; + if ((index->tunnel_exist != AL_ETH_FWD_CTRL_IDX_TUNNEL_ANY) + && (index->tunnel_exist != AL_REG_BIT_GET(i, 1))) + return 0; + if ((index->vlan_exist != AL_ETH_FWD_CTRL_IDX_VLAN_ANY) + && (index->vlan_exist != AL_REG_BIT_GET(i, 2))) + return 0; + if ((index->mac_table_match != AL_ETH_FWD_CTRL_IDX_MAC_TABLE_ANY) + && (index->mac_table_match != AL_REG_BIT_GET(i, 3))) + return 0; + if ((index->protocol_id != AL_ETH_PROTO_ID_ANY) + && (index->protocol_id != AL_REG_FIELD_GET(i, AL_FIELD_MASK(8,4),4))) + return 0; + if ((index->mac_type != AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE_ANY) + && (index->mac_type != AL_REG_FIELD_GET(i, AL_FIELD_MASK(10,9),9))) + return 0; + return 1; +} + +int al_eth_ctrl_table_set(struct al_hal_eth_adapter *adapter, + struct al_eth_fwd_ctrl_table_index *index, + struct al_eth_fwd_ctrl_table_entry *entry) +{ + uint32_t val = al_eth_fwd_ctrl_entry_to_val(entry); + uint32_t i; + + for (i = 0; i < AL_ETH_RX_CTRL_TABLE_SIZE; i++) { + if (al_eth_ctrl_index_match(index, i)) { + al_reg_write32(&adapter->ec_regs_base->rfw.ctrl_table_addr, i); + al_reg_write32(&adapter->ec_regs_base->rfw.ctrl_table_data, val); + } + } + return 0; +} + +int al_eth_ctrl_table_def_set(struct al_hal_eth_adapter *adapter, + al_bool use_table, + struct al_eth_fwd_ctrl_table_entry *entry) +{ + uint32_t val = al_eth_fwd_ctrl_entry_to_val(entry); + + if (use_table) + val |= EC_RFW_CTRL_TABLE_DEF_SEL; + + al_reg_write32(&adapter->ec_regs_base->rfw.ctrl_table_def, val); + + return 0; +} + +int al_eth_ctrl_table_raw_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t entry) +{ + + al_assert(idx < AL_ETH_RX_CTRL_TABLE_SIZE); /* valid CTRL index */ + + + al_reg_write32(&adapter->ec_regs_base->rfw.ctrl_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->rfw.ctrl_table_data, entry); + return 0; +} + +int al_eth_ctrl_table_def_raw_set(struct al_hal_eth_adapter *adapter, uint32_t val) +{ + al_reg_write32(&adapter->ec_regs_base->rfw.ctrl_table_def, val); + + return 0; +} + +int al_eth_hash_key_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t val) +{ + + al_assert(idx < AL_ETH_RX_HASH_KEY_NUM); /*valid CTRL index*/ + + al_reg_write32(&adapter->ec_regs_base->rfw_hash[idx].key, val); + + return 0; +} + +static uint32_t al_eth_fwd_mac_table_entry_to_val(struct al_eth_fwd_mac_table_entry *entry) +{ + uint32_t val = 0; + + val |= (entry->filter == AL_TRUE) ? EC_FWD_MAC_CTRL_RX_VAL_DROP : 0; + val |= ((entry->udma_mask << EC_FWD_MAC_CTRL_RX_VAL_UDMA_SHIFT) & + EC_FWD_MAC_CTRL_RX_VAL_UDMA_MASK); + + val |= ((entry->qid << EC_FWD_MAC_CTRL_RX_VAL_QID_SHIFT) & + EC_FWD_MAC_CTRL_RX_VAL_QID_MASK); + + val |= (entry->rx_valid == AL_TRUE) ? EC_FWD_MAC_CTRL_RX_VALID : 0; + + val |= ((entry->tx_target << EC_FWD_MAC_CTRL_TX_VAL_SHIFT) & + EC_FWD_MAC_CTRL_TX_VAL_MASK); + + val |= (entry->tx_valid == AL_TRUE) ? EC_FWD_MAC_CTRL_TX_VALID : 0; + + return val; +} + +int al_eth_fwd_mac_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, + struct al_eth_fwd_mac_table_entry *entry) +{ + uint32_t val; + + al_assert(idx < AL_ETH_FWD_MAC_NUM); /*valid FWD MAC index */ + + val = (entry->addr[2] << 24) | (entry->addr[3] << 16) | + (entry->addr[4] << 8) | entry->addr[5]; + al_reg_write32(&adapter->ec_regs_base->fwd_mac[idx].data_l, val); + val = (entry->addr[0] << 8) | entry->addr[1]; + al_reg_write32(&adapter->ec_regs_base->fwd_mac[idx].data_h, val); + val = (entry->mask[2] << 24) | (entry->mask[3] << 16) | + (entry->mask[4] << 8) | entry->mask[5]; + al_reg_write32(&adapter->ec_regs_base->fwd_mac[idx].mask_l, val); + val = (entry->mask[0] << 8) | entry->mask[1]; + al_reg_write32(&adapter->ec_regs_base->fwd_mac[idx].mask_h, val); + + val = al_eth_fwd_mac_table_entry_to_val(entry); + al_reg_write32(&adapter->ec_regs_base->fwd_mac[idx].ctrl, val); + return 0; +} + + + +int al_eth_fwd_mac_addr_raw_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t addr_lo, uint32_t addr_hi, uint32_t mask_lo, uint32_t mask_hi) +{ + al_assert(idx < AL_ETH_FWD_MAC_NUM); /*valid FWD MAC index */ + + al_reg_write32(&adapter->ec_regs_base->fwd_mac[idx].data_l, addr_lo); + al_reg_write32(&adapter->ec_regs_base->fwd_mac[idx].data_h, addr_hi); + al_reg_write32(&adapter->ec_regs_base->fwd_mac[idx].mask_l, mask_lo); + al_reg_write32(&adapter->ec_regs_base->fwd_mac[idx].mask_h, mask_hi); + + return 0; +} + +int al_eth_fwd_mac_ctrl_raw_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t ctrl) +{ + al_assert(idx < AL_ETH_FWD_MAC_NUM); /*valid FWD MAC index */ + + al_reg_write32(&adapter->ec_regs_base->fwd_mac[idx].ctrl, ctrl); + + return 0; +} + +int al_eth_mac_addr_store(void * __iomem ec_base, uint32_t idx, uint8_t *addr) +{ + struct al_ec_regs __iomem *ec_regs_base = (struct al_ec_regs __iomem*)ec_base; + uint32_t val; + + al_assert(idx < AL_ETH_FWD_MAC_NUM); /*valid FWD MAC index */ + + val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]; + al_reg_write32(&ec_regs_base->fwd_mac[idx].data_l, val); + val = (addr[0] << 8) | addr[1]; + al_reg_write32(&ec_regs_base->fwd_mac[idx].data_h, val); + return 0; +} + +int al_eth_mac_addr_read(void * __iomem ec_base, uint32_t idx, uint8_t *addr) +{ + struct al_ec_regs __iomem *ec_regs_base = (struct al_ec_regs __iomem*)ec_base; + uint32_t addr_lo = al_reg_read32(&ec_regs_base->fwd_mac[idx].data_l); + uint16_t addr_hi = al_reg_read32(&ec_regs_base->fwd_mac[idx].data_h); + + addr[5] = addr_lo & 0xff; + addr[4] = (addr_lo >> 8) & 0xff; + addr[3] = (addr_lo >> 16) & 0xff; + addr[2] = (addr_lo >> 24) & 0xff; + addr[1] = addr_hi & 0xff; + addr[0] = (addr_hi >> 8) & 0xff; + return 0; +} + +int al_eth_fwd_mhash_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t udma_mask, uint8_t qid) +{ + uint32_t val = 0; + al_assert(idx < AL_ETH_FWD_MAC_HASH_NUM); /* valid MHASH index */ + + AL_REG_FIELD_SET(val, AL_FIELD_MASK(3,0), 0, udma_mask); + AL_REG_FIELD_SET(val, AL_FIELD_MASK(5,4), 4, qid); + + al_reg_write32(&adapter->ec_regs_base->rfw.mhash_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->rfw.mhash_table_data, val); + return 0; +} +static uint32_t al_eth_fwd_vid_entry_to_val(struct al_eth_fwd_vid_table_entry *entry) +{ + uint32_t val = 0; + AL_REG_BIT_VAL_SET(val, 0, entry->control); + AL_REG_BIT_VAL_SET(val, 1, entry->filter); + AL_REG_FIELD_SET(val, AL_FIELD_MASK(5,2), 2, entry->udma_mask); + + return val; +} + +int al_eth_fwd_vid_config_set(struct al_hal_eth_adapter *adapter, al_bool use_table, + struct al_eth_fwd_vid_table_entry *default_entry, + uint32_t default_vlan) +{ + uint32_t reg; + + reg = al_eth_fwd_vid_entry_to_val(default_entry); + if (use_table) + reg |= EC_RFW_VID_TABLE_DEF_SEL; + else + reg &= ~EC_RFW_VID_TABLE_DEF_SEL; + al_reg_write32(&adapter->ec_regs_base->rfw.vid_table_def, reg); + al_reg_write32(&adapter->ec_regs_base->rfw.default_vlan, default_vlan); + + return 0; +} + +int al_eth_fwd_vid_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, + struct al_eth_fwd_vid_table_entry *entry) +{ + uint32_t val; + al_assert(idx < AL_ETH_FWD_VID_TABLE_NUM); /* valid VID index */ + + val = al_eth_fwd_vid_entry_to_val(entry); + al_reg_write32(&adapter->ec_regs_base->rfw.vid_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->rfw.vid_table_data, val); + return 0; +} + +int al_eth_fwd_pbits_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t prio) +{ + + al_assert(idx < AL_ETH_FWD_PBITS_TABLE_NUM); /* valid PBIT index */ + al_assert(prio < AL_ETH_FWD_PRIO_TABLE_NUM); /* valid PRIO index */ + al_reg_write32(&adapter->ec_regs_base->rfw.pbits_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->rfw.pbits_table_data, prio); + return 0; +} + +int al_eth_fwd_priority_table_set(struct al_hal_eth_adapter *adapter, uint8_t prio, uint8_t qid) +{ + al_assert(prio < AL_ETH_FWD_PRIO_TABLE_NUM); /* valid PRIO index */ + + al_reg_write32(&adapter->ec_regs_base->rfw_priority[prio].queue, qid); + return 0; +} + + +int al_eth_fwd_dscp_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t prio) +{ + + al_assert(idx < AL_ETH_FWD_DSCP_TABLE_NUM); /* valid DSCP index */ + + + al_reg_write32(&adapter->ec_regs_base->rfw.dscp_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->rfw.dscp_table_data, prio); + return 0; +} + +int al_eth_fwd_tc_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t prio) +{ + + al_assert(idx < AL_ETH_FWD_TC_TABLE_NUM); /* valid TC index */ + + + al_reg_write32(&adapter->ec_regs_base->rfw.tc_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->rfw.tc_table_data, prio); + return 0; +} + +/** Configure default UDMA register */ +int al_eth_fwd_default_udma_config(struct al_hal_eth_adapter *adapter, uint32_t idx, + uint8_t udma_mask) +{ + al_reg_write32_masked(&adapter->ec_regs_base->rfw_default[idx].opt_1, + EC_RFW_DEFAULT_OPT_1_UDMA_MASK, + udma_mask << EC_RFW_DEFAULT_OPT_1_UDMA_SHIFT); + return 0; +} + +/** Configure default queue register */ +int al_eth_fwd_default_queue_config(struct al_hal_eth_adapter *adapter, uint32_t idx, + uint8_t qid) +{ + al_reg_write32_masked(&adapter->ec_regs_base->rfw_default[idx].opt_1, + EC_RFW_DEFAULT_OPT_1_QUEUE_MASK, + qid << EC_RFW_DEFAULT_OPT_1_QUEUE_SHIFT); + return 0; +} + +/** Configure default priority register */ +int al_eth_fwd_default_priority_config(struct al_hal_eth_adapter *adapter, uint32_t idx, + uint8_t prio) +{ + al_reg_write32_masked(&adapter->ec_regs_base->rfw_default[idx].opt_1, + EC_RFW_DEFAULT_OPT_1_PRIORITY_MASK, + prio << EC_RFW_DEFAULT_OPT_1_PRIORITY_SHIFT); + return 0; +} + +int al_eth_switching_config_set(struct al_hal_eth_adapter *adapter, uint8_t udma_id, uint8_t forward_all_to_mac, uint8_t enable_int_switching, + enum al_eth_tx_switch_vid_sel_type vid_sel_type, + enum al_eth_tx_switch_dec_type uc_dec, + enum al_eth_tx_switch_dec_type mc_dec, + enum al_eth_tx_switch_dec_type bc_dec) +{ + uint32_t reg; + + if (udma_id == 0) { + reg = al_reg_read32(&adapter->ec_regs_base->tfw.tx_gen); + if (forward_all_to_mac) + reg |= EC_TFW_TX_GEN_FWD_ALL_TO_MAC; + else + reg &= ~EC_TFW_TX_GEN_FWD_ALL_TO_MAC; + al_reg_write32(&adapter->ec_regs_base->tfw.tx_gen, reg); + } + + reg = enable_int_switching; + reg |= (vid_sel_type & 7) << 1; + reg |= (bc_dec & 3) << 4; + reg |= (mc_dec & 3) << 6; + reg |= (uc_dec & 3) << 8; + al_reg_write32(&adapter->ec_regs_base->tfw_udma[udma_id].fwd_dec, reg); + + return 0; +} + +#define AL_ETH_RFW_FILTER_SUPPORTED(rev_id) \ + (AL_ETH_RFW_FILTER_UNDET_MAC | \ + AL_ETH_RFW_FILTER_DET_MAC | \ + AL_ETH_RFW_FILTER_TAGGED | \ + AL_ETH_RFW_FILTER_UNTAGGED | \ + AL_ETH_RFW_FILTER_BC | \ + AL_ETH_RFW_FILTER_MC | \ + AL_ETH_RFW_FILTER_VLAN_VID | \ + AL_ETH_RFW_FILTER_CTRL_TABLE | \ + AL_ETH_RFW_FILTER_PROT_INDEX | \ + ((rev_id > AL_ETH_REV_ID_0) ? ((AL_ETH_RFW_FILTER_WOL) | (AL_ETH_RFW_FILTER_PARSE)) : 0)) + +/* Configure the receive filters */ +int al_eth_filter_config(struct al_hal_eth_adapter *adapter, struct al_eth_filter_params *params) +{ + uint32_t reg; + + al_assert(params); /* valid params pointer */ + + if (params->filters & ~(AL_ETH_RFW_FILTER_SUPPORTED(adapter->rev_id))) { + al_err("[%s]: unsupported filter options (0x%08x)\n", adapter->name, params->filters); + return -EINVAL; + } + + reg = al_reg_read32(&adapter->ec_regs_base->rfw.out_cfg); + if (params->enable == AL_TRUE) + AL_REG_MASK_SET(reg, EC_RFW_OUT_CFG_DROP_EN); + else + AL_REG_MASK_CLEAR(reg, EC_RFW_OUT_CFG_DROP_EN); + al_reg_write32(&adapter->ec_regs_base->rfw.out_cfg, reg); + + al_reg_write32_masked( + &adapter->ec_regs_base->rfw.filter, + AL_ETH_RFW_FILTER_SUPPORTED(adapter->rev_id), + params->filters); + if (params->filters & AL_ETH_RFW_FILTER_PROT_INDEX) { + int i; + for (i = 0; i < AL_ETH_PROTOCOLS_NUM; i++) { + reg = al_reg_read32(&adapter->ec_regs_base->epe_a[i].prot_act); + if (params->filter_proto[i] == AL_TRUE) + AL_REG_MASK_SET(reg, EC_EPE_A_PROT_ACT_DROP); + else + AL_REG_MASK_CLEAR(reg, EC_EPE_A_PROT_ACT_DROP); + al_reg_write32(&adapter->ec_regs_base->epe_a[i].prot_act, reg); + } + } + return 0; +} + +/* Configure the receive override filters */ +int al_eth_filter_override_config(struct al_hal_eth_adapter *adapter, + struct al_eth_filter_override_params *params) +{ + uint32_t reg; + + al_assert(params); /* valid params pointer */ + + if (params->filters & ~(AL_ETH_RFW_FILTER_SUPPORTED(adapter->rev_id))) { + al_err("[%s]: unsupported override filter options (0x%08x)\n", adapter->name, params->filters); + return -EINVAL; + } + + al_reg_write32_masked( + &adapter->ec_regs_base->rfw.filter, + AL_ETH_RFW_FILTER_SUPPORTED(adapter->rev_id) << 16, + params->filters << 16); + + reg = al_reg_read32(&adapter->ec_regs_base->rfw.default_or); + AL_REG_FIELD_SET(reg, EC_RFW_DEFAULT_OR_UDMA_MASK, EC_RFW_DEFAULT_OR_UDMA_SHIFT, params->udma); + AL_REG_FIELD_SET(reg, EC_RFW_DEFAULT_OR_QUEUE_MASK, EC_RFW_DEFAULT_OR_QUEUE_SHIFT, params->qid); + al_reg_write32(&adapter->ec_regs_base->rfw.default_or, reg); + return 0; +} + + + +int al_eth_switching_default_bitmap_set(struct al_hal_eth_adapter *adapter, uint8_t udma_id, uint8_t udma_uc_bitmask, + uint8_t udma_mc_bitmask,uint8_t udma_bc_bitmask) +{ + al_reg_write32(&adapter->ec_regs_base->tfw_udma[udma_id].uc_udma, udma_uc_bitmask); + al_reg_write32(&adapter->ec_regs_base->tfw_udma[udma_id].mc_udma, udma_mc_bitmask); + al_reg_write32(&adapter->ec_regs_base->tfw_udma[udma_id].bc_udma, udma_bc_bitmask); + + return 0; +} + +int al_eth_flow_control_config(struct al_hal_eth_adapter *adapter, struct al_eth_flow_control_params *params) +{ + uint32_t reg; + int i; + al_assert(params); /* valid params pointer */ + + switch(params->type){ + case AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE: + al_dbg("[%s]: config flow control to link pause mode.\n", adapter->name); + + /* config the mac */ + if (AL_ETH_IS_1G_MAC(adapter->mac_mode)) { + /* set quanta value */ + al_reg_write32( + &adapter->mac_regs_base->mac_1g.pause_quant, + params->quanta); + al_reg_write32( + &adapter->ec_regs_base->efc.xoff_timer_1g, + params->quanta_th); + + } else if (AL_ETH_IS_10G_MAC(adapter->mac_mode) || AL_ETH_IS_25G_MAC(adapter->mac_mode)) { + /* set quanta value */ + al_reg_write32( + &adapter->mac_regs_base->mac_10g.cl01_pause_quanta, + params->quanta); + /* set quanta threshold value */ + al_reg_write32( + &adapter->mac_regs_base->mac_10g.cl01_quanta_thresh, + params->quanta_th); + } else { + /* set quanta value */ + al_eth_40g_mac_reg_write(adapter, + ETH_MAC_GEN_V3_MAC_40G_CL01_PAUSE_QUANTA_ADDR, + params->quanta); + /* set quanta threshold value */ + al_eth_40g_mac_reg_write(adapter, + ETH_MAC_GEN_V3_MAC_40G_CL01_QUANTA_THRESH_ADDR, + params->quanta_th); + } + + if (params->obay_enable == AL_TRUE) + /* Tx path FIFO, unmask pause_on from MAC when PAUSE packet received */ + al_reg_write32(&adapter->ec_regs_base->efc.ec_pause, 1); + else + al_reg_write32(&adapter->ec_regs_base->efc.ec_pause, 0); + + + /* Rx path */ + if (params->gen_enable == AL_TRUE) + /* enable generating xoff from ec fifo almost full indication in hysteresis mode */ + al_reg_write32(&adapter->ec_regs_base->efc.ec_xoff, 1 << EC_EFC_EC_XOFF_MASK_2_SHIFT); + else + al_reg_write32(&adapter->ec_regs_base->efc.ec_xoff, 0); + + if (AL_ETH_IS_1G_MAC(adapter->mac_mode)) + /* in 1G mode, enable generating xon from ec fifo in hysteresis mode*/ + al_reg_write32(&adapter->ec_regs_base->efc.xon, EC_EFC_XON_MASK_2 | EC_EFC_XON_MASK_1); + + /* set hysteresis mode thresholds */ + al_reg_write32(&adapter->ec_regs_base->efc.rx_fifo_hyst, params->rx_fifo_th_low | (params->rx_fifo_th_high << EC_EFC_RX_FIFO_HYST_TH_HIGH_SHIFT)); + + for (i = 0; i < 4; i++) { + if (params->obay_enable == AL_TRUE) + /* Tx path UDMA, unmask pause_on for all queues */ + al_reg_write32(&adapter->ec_regs_base->fc_udma[i].q_pause_0, + params->prio_q_map[i][0]); + else + al_reg_write32(&adapter->ec_regs_base->fc_udma[i].q_pause_0, 0); + + if (params->gen_enable == AL_TRUE) + /* Rx path UDMA, enable generating xoff from UDMA queue almost full indication */ + al_reg_write32(&adapter->ec_regs_base->fc_udma[i].q_xoff_0, params->prio_q_map[i][0]); + else + al_reg_write32(&adapter->ec_regs_base->fc_udma[i].q_xoff_0, 0); + } + break; + case AL_ETH_FLOW_CONTROL_TYPE_PFC: + al_dbg("[%s]: config flow control to PFC mode.\n", adapter->name); + al_assert(!AL_ETH_IS_1G_MAC(adapter->mac_mode)); /* pfc not available for RGMII mode */; + + for (i = 0; i < 4; i++) { + int prio; + for (prio = 0; prio < 8; prio++) { + if (params->obay_enable == AL_TRUE) + /* Tx path UDMA, unmask pause_on for all queues */ + al_reg_write32(&adapter->ec_regs_base->fc_udma[i].q_pause_0 + prio, + params->prio_q_map[i][prio]); + else + al_reg_write32(&adapter->ec_regs_base->fc_udma[i].q_pause_0 + prio, + 0); + + if (params->gen_enable == AL_TRUE) + al_reg_write32(&adapter->ec_regs_base->fc_udma[i].q_xoff_0 + prio, + params->prio_q_map[i][prio]); + else + al_reg_write32(&adapter->ec_regs_base->fc_udma[i].q_xoff_0 + prio, + 0); + } + } + + /* Rx path */ + /* enable generating xoff from ec fifo almost full indication in hysteresis mode */ + if (params->gen_enable == AL_TRUE) + al_reg_write32(&adapter->ec_regs_base->efc.ec_xoff, 0xFF << EC_EFC_EC_XOFF_MASK_2_SHIFT); + else + al_reg_write32(&adapter->ec_regs_base->efc.ec_xoff, 0); + + /* set hysteresis mode thresholds */ + al_reg_write32(&adapter->ec_regs_base->efc.rx_fifo_hyst, params->rx_fifo_th_low | (params->rx_fifo_th_high << EC_EFC_RX_FIFO_HYST_TH_HIGH_SHIFT)); + + if (AL_ETH_IS_10G_MAC(adapter->mac_mode) || AL_ETH_IS_25G_MAC(adapter->mac_mode)) { + /* config the 10g_mac */ + /* set quanta value (same value for all prios) */ + reg = params->quanta | (params->quanta << 16); + al_reg_write32( + &adapter->mac_regs_base->mac_10g.cl01_pause_quanta, reg); + al_reg_write32( + &adapter->mac_regs_base->mac_10g.cl23_pause_quanta, reg); + al_reg_write32( + &adapter->mac_regs_base->mac_10g.cl45_pause_quanta, reg); + al_reg_write32( + &adapter->mac_regs_base->mac_10g.cl67_pause_quanta, reg); + /* set quanta threshold value (same value for all prios) */ + reg = params->quanta_th | (params->quanta_th << 16); + al_reg_write32( + &adapter->mac_regs_base->mac_10g.cl01_quanta_thresh, reg); + al_reg_write32( + &adapter->mac_regs_base->mac_10g.cl23_quanta_thresh, reg); + al_reg_write32( + &adapter->mac_regs_base->mac_10g.cl45_quanta_thresh, reg); + al_reg_write32( + &adapter->mac_regs_base->mac_10g.cl67_quanta_thresh, reg); + + /* enable PFC in the 10g_MAC */ + reg = al_reg_read32(&adapter->mac_regs_base->mac_10g.cmd_cfg); + reg |= 1 << 19; + al_reg_write32(&adapter->mac_regs_base->mac_10g.cmd_cfg, reg); + } else { + /* config the 40g_mac */ + /* set quanta value (same value for all prios) */ + reg = params->quanta | (params->quanta << 16); + al_eth_40g_mac_reg_write(adapter, + ETH_MAC_GEN_V3_MAC_40G_CL01_PAUSE_QUANTA_ADDR, reg); + al_eth_40g_mac_reg_write(adapter, + ETH_MAC_GEN_V3_MAC_40G_CL23_PAUSE_QUANTA_ADDR, reg); + al_eth_40g_mac_reg_write(adapter, + ETH_MAC_GEN_V3_MAC_40G_CL45_PAUSE_QUANTA_ADDR, reg); + al_eth_40g_mac_reg_write(adapter, + ETH_MAC_GEN_V3_MAC_40G_CL67_PAUSE_QUANTA_ADDR, reg); + /* set quanta threshold value (same value for all prios) */ + reg = params->quanta_th | (params->quanta_th << 16); + al_eth_40g_mac_reg_write(adapter, + ETH_MAC_GEN_V3_MAC_40G_CL01_QUANTA_THRESH_ADDR, reg); + al_eth_40g_mac_reg_write(adapter, + ETH_MAC_GEN_V3_MAC_40G_CL23_QUANTA_THRESH_ADDR, reg); + al_eth_40g_mac_reg_write(adapter, + ETH_MAC_GEN_V3_MAC_40G_CL45_QUANTA_THRESH_ADDR, reg); + al_eth_40g_mac_reg_write(adapter, + ETH_MAC_GEN_V3_MAC_40G_CL67_QUANTA_THRESH_ADDR, reg); + + /* enable PFC in the 40g_MAC */ + reg = al_reg_read32(&adapter->mac_regs_base->mac_10g.cmd_cfg); + reg |= 1 << 19; + al_reg_write32(&adapter->mac_regs_base->mac_10g.cmd_cfg, reg); + reg = al_eth_40g_mac_reg_read(adapter, ETH_MAC_GEN_V3_MAC_40G_COMMAND_CONFIG_ADDR); + + reg |= ETH_MAC_GEN_V3_MAC_40G_COMMAND_CONFIG_PFC_MODE; + + al_eth_40g_mac_reg_write(adapter, ETH_MAC_GEN_V3_MAC_40G_COMMAND_CONFIG_ADDR, reg); + } + + break; + default: + al_err("[%s]: unsupported flow control type %d\n", adapter->name, params->type); + return -EINVAL; + + } + return 0; +} + +int al_eth_vlan_mod_config(struct al_hal_eth_adapter *adapter, uint8_t udma_id, uint16_t udma_etype, uint16_t vlan1_data, uint16_t vlan2_data) +{ + al_dbg("[%s]: config vlan modification registers. udma id %d.\n", adapter->name, udma_id); + + al_reg_write32(&adapter->ec_regs_base->tpm_sel[udma_id].etype, udma_etype); + al_reg_write32(&adapter->ec_regs_base->tpm_udma[udma_id].vlan_data, vlan1_data | (vlan2_data << 16)); + + return 0; +} + +int al_eth_eee_get(struct al_hal_eth_adapter *adapter, struct al_eth_eee_params *params) +{ + uint32_t reg; + + al_dbg("[%s]: getting eee.\n", adapter->name); + + reg = al_reg_read32(&adapter->ec_regs_base->eee.cfg_e); + params->enable = (reg & EC_EEE_CFG_E_ENABLE) ? AL_TRUE : AL_FALSE; + + params->tx_eee_timer = al_reg_read32(&adapter->ec_regs_base->eee.pre_cnt); + params->min_interval = al_reg_read32(&adapter->ec_regs_base->eee.post_cnt); + params->stop_cnt = al_reg_read32(&adapter->ec_regs_base->eee.stop_cnt); + + return 0; +} + + +int al_eth_eee_config(struct al_hal_eth_adapter *adapter, struct al_eth_eee_params *params) +{ + uint32_t reg; + al_dbg("[%s]: config eee.\n", adapter->name); + + if (params->enable == 0) { + al_dbg("[%s]: disable eee.\n", adapter->name); + al_reg_write32(&adapter->ec_regs_base->eee.cfg_e, 0); + return 0; + } + + + al_reg_write32(&adapter->ec_regs_base->eee.pre_cnt, params->tx_eee_timer); + al_reg_write32(&adapter->ec_regs_base->eee.post_cnt, params->min_interval); + al_reg_write32(&adapter->ec_regs_base->eee.stop_cnt, params->stop_cnt); + + reg = EC_EEE_CFG_E_MASK_EC_TMI_STOP | EC_EEE_CFG_E_MASK_MAC_EEE | + EC_EEE_CFG_E_ENABLE | + EC_EEE_CFG_E_USE_EC_TX_FIFO | EC_EEE_CFG_E_USE_EC_RX_FIFO; + + /* + * Addressing RMN: 3732 + * + * RMN description: + * When the HW get into eee mode, it can't transmit any pause packet + * (when flow control policy is enabled). + * In such case, the HW has no way to handle extreme pushback from + * the Rx_path fifos. + * + * Software flow: + * Configure RX_FIFO empty as eee mode term. + * That way, nothing will prevent pause packet transmittion in + * case of extreme pushback from the Rx_path fifos. + * + */ + + al_reg_write32(&adapter->ec_regs_base->eee.cfg_e, reg); + + return 0; +} + +/* Timestamp */ +/* prepare the adapter for doing Timestamps for Rx packets. */ +int al_eth_ts_init(struct al_hal_eth_adapter *adapter) +{ + uint32_t reg; + + /*TODO: + * return error when: + * - working in 1G mode and MACSEC enabled + * - RX completion descriptor is not 8 words + */ + reg = al_reg_read32(&adapter->ec_regs_base->gen.en_ext); + if (AL_ETH_IS_1G_MAC(adapter->mac_mode)) + reg &= ~EC_GEN_EN_EXT_PTH_1_10_SEL; + else + reg |= EC_GEN_EN_EXT_PTH_1_10_SEL; + /* + * set completion bypass so tx timestamps won't be inserted to tx cmpl + * (in order to disable unverified flow) + */ + reg |= EC_GEN_EN_EXT_PTH_COMPLETION_BYPASS; + al_reg_write32(&adapter->ec_regs_base->gen.en_ext, reg); + + /*TODO: add the following when we have updated regs file: + * reg_rfw_out_cfg_timestamp_sample_out + 0 (default) – use the timestamp from the SOP info (10G MAC) + 1 – use the timestamp from the EOP (1G MAC) (noly when MACSEC is disabled) + */ + return 0; +} + +/* read Timestamp sample value of previously transmitted packet. */ +int al_eth_tx_ts_val_get(struct al_hal_eth_adapter *adapter, uint8_t ts_index, + uint32_t *timestamp) +{ + al_assert(ts_index < AL_ETH_PTH_TX_SAMPLES_NUM); + + /* in 1G mode, only indexes 1-7 are allowed*/ + if (AL_ETH_IS_1G_MAC(adapter->mac_mode)) { + al_assert(ts_index <= 7); + al_assert(ts_index >= 1); + } + + /*TODO: check if sample is valid */ + *timestamp = al_reg_read32(&adapter->ec_regs_base->pth_db[ts_index].ts); + return 0; +} + +/* Read the systime value */ +int al_eth_pth_systime_read(struct al_hal_eth_adapter *adapter, + struct al_eth_pth_time *systime) +{ + uint32_t reg; + + /* first we must read the subseconds MSB so the seconds register will be + * shadowed + */ + reg = al_reg_read32(&adapter->ec_regs_base->pth.system_time_subseconds_msb); + systime->femto = (uint64_t)reg << 18; + reg = al_reg_read32(&adapter->ec_regs_base->pth.system_time_seconds); + systime->seconds = reg; + + return 0; +} + +/* Set the clock period to a given value. */ +int al_eth_pth_clk_period_write(struct al_hal_eth_adapter *adapter, + uint64_t clk_period) +{ + uint32_t reg; + /* first write the LSB so it will be shadowed */ + /* bits 31:14 of the clock period lsb register contains bits 17:0 of the + * period. + */ + reg = (clk_period & AL_BIT_MASK(18)) << EC_PTH_CLOCK_PERIOD_LSB_VAL_SHIFT; + al_reg_write32(&adapter->ec_regs_base->pth.clock_period_lsb, reg); + reg = clk_period >> 18; + al_reg_write32(&adapter->ec_regs_base->pth.clock_period_msb, reg); + + return 0; +} + +/* Configure the systime internal update */ +int al_eth_pth_int_update_config(struct al_hal_eth_adapter *adapter, + struct al_eth_pth_int_update_params *params) +{ + uint32_t reg; + + reg = al_reg_read32(&adapter->ec_regs_base->pth.int_update_ctrl); + if (params->enable == AL_FALSE) { + reg &= ~EC_PTH_INT_UPDATE_CTRL_INT_TRIG_EN; + } else { + reg |= EC_PTH_INT_UPDATE_CTRL_INT_TRIG_EN; + AL_REG_FIELD_SET(reg, EC_PTH_INT_UPDATE_CTRL_UPDATE_METHOD_MASK, + EC_PTH_INT_UPDATE_CTRL_UPDATE_METHOD_SHIFT, + params->method); + if (params->trigger == AL_ETH_PTH_INT_TRIG_REG_WRITE) + reg |= EC_PTH_INT_UPDATE_CTRL_UPDATE_TRIG; + else + reg &= ~EC_PTH_INT_UPDATE_CTRL_UPDATE_TRIG; + } + al_reg_write32(&adapter->ec_regs_base->pth.int_update_ctrl, reg); + return 0; +} +/* set internal update time */ +int al_eth_pth_int_update_time_set(struct al_hal_eth_adapter *adapter, + struct al_eth_pth_time *time) +{ + uint32_t reg; + + al_reg_write32(&adapter->ec_regs_base->pth.int_update_seconds, + time->seconds); + reg = time->femto & AL_BIT_MASK(18); + reg = reg << EC_PTH_INT_UPDATE_SUBSECONDS_LSB_VAL_SHIFT; + al_reg_write32(&adapter->ec_regs_base->pth.int_update_subseconds_lsb, + reg); + reg = time->femto >> 18; + al_reg_write32(&adapter->ec_regs_base->pth.int_update_subseconds_msb, + reg); + + return 0; +} + +/* Configure the systime external update */ +int al_eth_pth_ext_update_config(struct al_hal_eth_adapter *adapter, + struct al_eth_pth_ext_update_params * params) +{ + uint32_t reg; + + reg = al_reg_read32(&adapter->ec_regs_base->pth.int_update_ctrl); + AL_REG_FIELD_SET(reg, EC_PTH_INT_UPDATE_CTRL_UPDATE_METHOD_MASK, + EC_PTH_INT_UPDATE_CTRL_UPDATE_METHOD_SHIFT, + params->method); + + AL_REG_FIELD_SET(reg, EC_PTH_EXT_UPDATE_CTRL_EXT_TRIG_EN_MASK, + EC_PTH_EXT_UPDATE_CTRL_EXT_TRIG_EN_SHIFT, + params->triggers); + al_reg_write32(&adapter->ec_regs_base->pth.int_update_ctrl, reg); + return 0; +} + +/* set external update time */ +int al_eth_pth_ext_update_time_set(struct al_hal_eth_adapter *adapter, + struct al_eth_pth_time *time) +{ + uint32_t reg; + + al_reg_write32(&adapter->ec_regs_base->pth.ext_update_seconds, + time->seconds); + reg = time->femto & AL_BIT_MASK(18); + reg = reg << EC_PTH_EXT_UPDATE_SUBSECONDS_LSB_VAL_SHIFT; + al_reg_write32(&adapter->ec_regs_base->pth.ext_update_subseconds_lsb, + reg); + reg = time->femto >> 18; + al_reg_write32(&adapter->ec_regs_base->pth.ext_update_subseconds_msb, + reg); + + return 0; +}; + +/* set the read compensation delay */ +int al_eth_pth_read_compensation_set(struct al_hal_eth_adapter *adapter, + uint64_t subseconds) +{ + uint32_t reg; + + /* first write to lsb to ensure atomicity */ + reg = (subseconds & AL_BIT_MASK(18)) << EC_PTH_READ_COMPENSATION_SUBSECONDS_LSB_VAL_SHIFT; + al_reg_write32(&adapter->ec_regs_base->pth.read_compensation_subseconds_lsb, reg); + + reg = subseconds >> 18; + al_reg_write32(&adapter->ec_regs_base->pth.read_compensation_subseconds_msb, reg); + return 0; +} + +/* set the internal write compensation delay */ +int al_eth_pth_int_write_compensation_set(struct al_hal_eth_adapter *adapter, + uint64_t subseconds) +{ + uint32_t reg; + + /* first write to lsb to ensure atomicity */ + reg = (subseconds & AL_BIT_MASK(18)) << EC_PTH_INT_WRITE_COMPENSATION_SUBSECONDS_LSB_VAL_SHIFT; + al_reg_write32(&adapter->ec_regs_base->pth.int_write_compensation_subseconds_lsb, reg); + + reg = subseconds >> 18; + al_reg_write32(&adapter->ec_regs_base->pth.int_write_compensation_subseconds_msb, reg); + return 0; +} + +/* set the external write compensation delay */ +int al_eth_pth_ext_write_compensation_set(struct al_hal_eth_adapter *adapter, + uint64_t subseconds) +{ + uint32_t reg; + + /* first write to lsb to ensure atomicity */ + reg = (subseconds & AL_BIT_MASK(18)) << EC_PTH_EXT_WRITE_COMPENSATION_SUBSECONDS_LSB_VAL_SHIFT; + al_reg_write32(&adapter->ec_regs_base->pth.ext_write_compensation_subseconds_lsb, reg); + + reg = subseconds >> 18; + al_reg_write32(&adapter->ec_regs_base->pth.ext_write_compensation_subseconds_msb, reg); + return 0; +} + +/* set the sync compensation delay */ +int al_eth_pth_sync_compensation_set(struct al_hal_eth_adapter *adapter, + uint64_t subseconds) +{ + uint32_t reg; + + /* first write to lsb to ensure atomicity */ + reg = (subseconds & AL_BIT_MASK(18)) << EC_PTH_SYNC_COMPENSATION_SUBSECONDS_LSB_VAL_SHIFT; + al_reg_write32(&adapter->ec_regs_base->pth.sync_compensation_subseconds_lsb, reg); + + reg = subseconds >> 18; + al_reg_write32(&adapter->ec_regs_base->pth.sync_compensation_subseconds_msb, reg); + return 0; +} + +/* Configure an output pulse */ +int al_eth_pth_pulse_out_config(struct al_hal_eth_adapter *adapter, + struct al_eth_pth_pulse_out_params *params) +{ + uint32_t reg; + + if (params->index >= AL_ETH_PTH_PULSE_OUT_NUM) { + al_err("eth [%s] PTH out pulse index out of range\n", + adapter->name); + return -EINVAL; + } + reg = al_reg_read32(&adapter->ec_regs_base->pth_egress[params->index].trigger_ctrl); + if (params->enable == AL_FALSE) { + reg &= ~EC_PTH_EGRESS_TRIGGER_CTRL_EN; + } else { + reg |= EC_PTH_EGRESS_TRIGGER_CTRL_EN; + if (params->periodic == AL_FALSE) + reg &= ~EC_PTH_EGRESS_TRIGGER_CTRL_PERIODIC; + else + reg |= EC_PTH_EGRESS_TRIGGER_CTRL_PERIODIC; + + AL_REG_FIELD_SET(reg, EC_PTH_EGRESS_TRIGGER_CTRL_PERIOD_SUBSEC_MASK, + EC_PTH_EGRESS_TRIGGER_CTRL_PERIOD_SUBSEC_SHIFT, + params->period_us); + AL_REG_FIELD_SET(reg, EC_PTH_EGRESS_TRIGGER_CTRL_PERIOD_SEC_MASK, + EC_PTH_EGRESS_TRIGGER_CTRL_PERIOD_SEC_SHIFT, + params->period_sec); + } + al_reg_write32(&adapter->ec_regs_base->pth_egress[params->index].trigger_ctrl, reg); + + /* set trigger time */ + al_reg_write32(&adapter->ec_regs_base->pth_egress[params->index].trigger_seconds, + params->start_time.seconds); + reg = params->start_time.femto & AL_BIT_MASK(18); + reg = reg << EC_PTH_EGRESS_TRIGGER_SUBSECONDS_LSB_VAL_SHIFT; + al_reg_write32(&adapter->ec_regs_base->pth_egress[params->index].trigger_subseconds_lsb, + reg); + reg = params->start_time.femto >> 18; + al_reg_write32(&adapter->ec_regs_base->pth_egress[params->index].trigger_subseconds_msb, + reg); + + /* set pulse width */ + reg = params->pulse_width & AL_BIT_MASK(18); + reg = reg << EC_PTH_EGRESS_PULSE_WIDTH_SUBSECONDS_LSB_VAL_SHIFT; + al_reg_write32(&adapter->ec_regs_base->pth_egress[params->index].pulse_width_subseconds_lsb, reg); + + reg = params->pulse_width >> 18; + al_reg_write32(&adapter->ec_regs_base->pth_egress[params->index].pulse_width_subseconds_msb, reg); + + return 0; +} + +/** get link status */ +int al_eth_link_status_get(struct al_hal_eth_adapter *adapter, + struct al_eth_link_status *status) +{ + uint32_t reg; + + if (AL_ETH_IS_10G_MAC(adapter->mac_mode) || AL_ETH_IS_25G_MAC(adapter->mac_mode)) { + reg = al_reg_read32(&adapter->mac_regs_base->gen.mac_10g_stat); + + status->link_up = AL_TRUE; + + if (reg & (ETH_MAC_GEN_MAC_10G_STAT_LOC_FAULT | + ETH_MAC_GEN_MAC_10G_STAT_REM_FAULT)) + status->link_up = AL_FALSE; + + } else if (adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) { + al_reg_write32(&adapter->mac_regs_base->sgmii.reg_addr, 1); + /* + * This register is latched low so need to read twice to get + * the current link status + */ + reg = al_reg_read32(&adapter->mac_regs_base->sgmii.reg_data); + reg = al_reg_read32(&adapter->mac_regs_base->sgmii.reg_data); + + status->link_up = AL_FALSE; + + if (reg & AL_BIT(2)) + status->link_up = AL_TRUE; + + reg = al_reg_read32(&adapter->mac_regs_base->sgmii.link_stat); + + if ((reg & AL_BIT(3)) == 0) + status->link_up = AL_FALSE; + + } else if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) { + reg = al_reg_read32(&adapter->mac_regs_base->gen.rgmii_stat); + + status->link_up = AL_FALSE; + + if (reg & AL_BIT(4)) + status->link_up = AL_TRUE; + + } else if (adapter->mac_mode == AL_ETH_MAC_MODE_XLG_LL_40G) { + reg = al_reg_read32(&adapter->mac_regs_base->gen_v3.pcs_40g_ll_status); + + status->link_up = AL_FALSE; + + if ((reg & 0x1F) == 0x1F) + status->link_up = AL_TRUE; + + } else { + /* not implemented yet */ + return -EPERM; + } + + al_dbg("[%s]: mac %s port. link_status: %s.\n", adapter->name, + al_eth_mac_mode_str(adapter->mac_mode), + (status->link_up == AL_TRUE) ? "LINK_UP" : "LINK_DOWN"); + + return 0; +} + +/** set LED mode and value */ +int al_eth_led_set(struct al_hal_eth_adapter *adapter, al_bool link_is_up) +{ + uint32_t reg = 0; + uint32_t mode = ETH_MAC_GEN_LED_CFG_SEL_DEFAULT_REG; + + if (link_is_up) + mode = ETH_MAC_GEN_LED_CFG_SEL_LINK_ACTIVITY; + + AL_REG_FIELD_SET(reg, ETH_MAC_GEN_LED_CFG_SEL_MASK, + ETH_MAC_GEN_LED_CFG_SEL_SHIFT, mode); + + AL_REG_FIELD_SET(reg, ETH_MAC_GEN_LED_CFG_BLINK_TIMER_MASK, + ETH_MAC_GEN_LED_CFG_BLINK_TIMER_SHIFT, + ETH_MAC_GEN_LED_CFG_BLINK_TIMER_VAL); + + AL_REG_FIELD_SET(reg, ETH_MAC_GEN_LED_CFG_ACT_TIMER_MASK, + ETH_MAC_GEN_LED_CFG_ACT_TIMER_SHIFT, + ETH_MAC_GEN_LED_CFG_ACT_TIMER_VAL); + + al_reg_write32(&adapter->mac_regs_base->gen.led_cfg, reg); + + return 0; +} + +/* get statistics */ +int al_eth_mac_stats_get(struct al_hal_eth_adapter *adapter, struct al_eth_mac_stats *stats) +{ + al_assert(stats); + + if (AL_ETH_IS_1G_MAC(adapter->mac_mode)) { + void __iomem *mac_1g_regs_base = &adapter->mac_regs_base->mac_1g; + + stats->ifInUcastPkts = al_reg_read32(mac_1g_regs_base + 0x90); + stats->ifInMulticastPkts = al_reg_read32(mac_1g_regs_base + 0x94); + stats->ifInBroadcastPkts = al_reg_read32(mac_1g_regs_base + 0x98); + stats->etherStatsPkts = al_reg_read32(mac_1g_regs_base + 0xb4); + stats->ifOutUcastPkts = al_reg_read32(mac_1g_regs_base + 0xa0); + stats->ifOutMulticastPkts = al_reg_read32(mac_1g_regs_base + 0xa4); + stats->ifOutBroadcastPkts = al_reg_read32(mac_1g_regs_base + 0xa8); + stats->ifInErrors = al_reg_read32(mac_1g_regs_base + 0x88); + stats->ifOutErrors = al_reg_read32(mac_1g_regs_base + 0x8c); + + stats->aFramesReceivedOK = al_reg_read32(mac_1g_regs_base + 0x6c); + stats->aFramesTransmittedOK = al_reg_read32(mac_1g_regs_base + 0x68); + + stats->aOctetsReceivedOK = al_reg_read32(mac_1g_regs_base + 0x7c); + stats->aOctetsTransmittedOK = al_reg_read32(mac_1g_regs_base + 0x78); + + stats->etherStatsUndersizePkts = al_reg_read32(mac_1g_regs_base + 0xB8); + stats->etherStatsFragments = al_reg_read32(mac_1g_regs_base + 0xE0); + stats->etherStatsJabbers = al_reg_read32(mac_1g_regs_base + 0xDC); + stats->etherStatsOversizePkts = al_reg_read32(mac_1g_regs_base + 0xBC); + + stats->aFrameCheckSequenceErrors = al_reg_read32(mac_1g_regs_base + 0x70); + stats->aAlignmentErrors = al_reg_read32(mac_1g_regs_base + 0x74); + stats->etherStatsDropEvents = al_reg_read32(mac_1g_regs_base + 0xAC); + + stats->aPAUSEMACCtrlFramesTransmitted = al_reg_read32(mac_1g_regs_base + 0x80); + stats->aPAUSEMACCtrlFramesReceived = al_reg_read32(mac_1g_regs_base + 0x84); + stats->aFrameTooLongErrors = 0; /* N/A */ + stats->aInRangeLengthErrors = 0; /* N/A */ + stats->VLANTransmittedOK = 0; /* N/A */ + stats->VLANReceivedOK = 0; /* N/A */ + stats->etherStatsOctets = al_reg_read32(mac_1g_regs_base + 0xB0); + + stats->etherStatsPkts64Octets = al_reg_read32(mac_1g_regs_base + 0xC0); + stats->etherStatsPkts65to127Octets = al_reg_read32(mac_1g_regs_base + 0xC4); + stats->etherStatsPkts128to255Octets = al_reg_read32(mac_1g_regs_base + 0xC8); + stats->etherStatsPkts256to511Octets = al_reg_read32(mac_1g_regs_base + 0xCC); + stats->etherStatsPkts512to1023Octets = al_reg_read32(mac_1g_regs_base + 0xD0); + stats->etherStatsPkts1024to1518Octets = al_reg_read32(mac_1g_regs_base + 0xD4); + stats->etherStatsPkts1519toX = al_reg_read32(mac_1g_regs_base + 0xD8); + } else if (AL_ETH_IS_10G_MAC(adapter->mac_mode) || AL_ETH_IS_25G_MAC(adapter->mac_mode)) { + if (adapter->rev_id < AL_ETH_REV_ID_3) { + void __iomem *mac_10g_regs_base = &adapter->mac_regs_base->mac_10g; + uint64_t octets; + + stats->ifInUcastPkts = al_reg_read32(mac_10g_regs_base + 0xE0); + stats->ifInMulticastPkts = al_reg_read32(mac_10g_regs_base + 0xE8); + stats->ifInBroadcastPkts = al_reg_read32(mac_10g_regs_base + 0xF0); + stats->etherStatsPkts = al_reg_read32(mac_10g_regs_base + 0x130); + stats->ifOutUcastPkts = al_reg_read32(mac_10g_regs_base + 0x108); + stats->ifOutMulticastPkts = al_reg_read32(mac_10g_regs_base + 0x110); + stats->ifOutBroadcastPkts = al_reg_read32(mac_10g_regs_base + 0x118); + stats->ifInErrors = al_reg_read32(mac_10g_regs_base + 0x190); + stats->ifOutErrors = al_reg_read32(mac_10g_regs_base + 0xf8); + + stats->aFramesReceivedOK = al_reg_read32(mac_10g_regs_base + 0x88); + stats->aFramesTransmittedOK = al_reg_read32(mac_10g_regs_base + 0x80); + /* aOctetsReceivedOK = ifInOctets - 18 * aFramesReceivedOK - 4 * VLANReceivedOK */ + octets = al_reg_read32(mac_10g_regs_base + 0xD8); + octets |= (uint64_t)(al_reg_read32(mac_10g_regs_base + 0xDC)) << 32; + octets -= 18 * stats->aFramesReceivedOK; + octets -= 4 * al_reg_read32(mac_10g_regs_base + 0xC8); + stats->aOctetsReceivedOK = octets; + + /* aOctetsTransmittedOK = ifOutOctets - 18 * aFramesTransmittedOK - 4 * VLANTransmittedOK */ + octets = al_reg_read32(mac_10g_regs_base + 0xD0); + octets |= (uint64_t)(al_reg_read32(mac_10g_regs_base + 0xD4)) << 32; + octets -= 18 * stats->aFramesTransmittedOK; + octets -= 4 * al_reg_read32(mac_10g_regs_base + 0xC0); + stats->aOctetsTransmittedOK = octets; + + stats->etherStatsUndersizePkts = al_reg_read32(mac_10g_regs_base + 0x138); + stats->etherStatsFragments = al_reg_read32(mac_10g_regs_base + 0x188); + stats->etherStatsJabbers = al_reg_read32(mac_10g_regs_base + 0x180); + stats->etherStatsOversizePkts = al_reg_read32(mac_10g_regs_base + 0x178); + + stats->aFrameCheckSequenceErrors = al_reg_read32(mac_10g_regs_base + 0x90); + stats->aAlignmentErrors = al_reg_read32(mac_10g_regs_base + 0x98); + stats->etherStatsDropEvents = al_reg_read32(mac_10g_regs_base + 0x120); + + stats->aPAUSEMACCtrlFramesTransmitted = al_reg_read32(mac_10g_regs_base + 0xA0); + stats->aPAUSEMACCtrlFramesReceived = al_reg_read32(mac_10g_regs_base + 0xA8); + stats->aFrameTooLongErrors = al_reg_read32(mac_10g_regs_base + 0xB0); + stats->aInRangeLengthErrors = al_reg_read32(mac_10g_regs_base + 0xB8); + stats->VLANTransmittedOK = al_reg_read32(mac_10g_regs_base + 0xC0); + stats->VLANReceivedOK = al_reg_read32(mac_10g_regs_base + 0xC8); + stats->etherStatsOctets = al_reg_read32(mac_10g_regs_base + 0x128); + + stats->etherStatsPkts64Octets = al_reg_read32(mac_10g_regs_base + 0x140); + stats->etherStatsPkts65to127Octets = al_reg_read32(mac_10g_regs_base + 0x148); + stats->etherStatsPkts128to255Octets = al_reg_read32(mac_10g_regs_base + 0x150); + stats->etherStatsPkts256to511Octets = al_reg_read32(mac_10g_regs_base + 0x158); + stats->etherStatsPkts512to1023Octets = al_reg_read32(mac_10g_regs_base + 0x160); + stats->etherStatsPkts1024to1518Octets = al_reg_read32(mac_10g_regs_base + 0x168); + stats->etherStatsPkts1519toX = al_reg_read32(mac_10g_regs_base + 0x170); + } else { + void __iomem *mac_10g_regs_base = &adapter->mac_regs_base->mac_10g; + uint64_t octets; + /* TODO - change to 64 bit */ + stats->ifInUcastPkts = al_reg_read32(mac_10g_regs_base + 0x140); + stats->ifInMulticastPkts = al_reg_read32(mac_10g_regs_base + 0x148); + stats->ifInBroadcastPkts = al_reg_read32(mac_10g_regs_base + 0x150); + stats->etherStatsPkts = al_reg_read32(mac_10g_regs_base + 0x160); + stats->ifOutUcastPkts = al_reg_read32(mac_10g_regs_base + 0x240); + stats->ifOutMulticastPkts = al_reg_read32(mac_10g_regs_base + 0x248); + stats->ifOutBroadcastPkts = al_reg_read32(mac_10g_regs_base + 0x250); + stats->ifInErrors = al_reg_read32(mac_10g_regs_base + 0x138); + stats->ifOutErrors = al_reg_read32(mac_10g_regs_base + 0x238); + + stats->aFramesReceivedOK = al_reg_read32(mac_10g_regs_base + 0x120); /*frames_ok*/ + stats->aFramesTransmittedOK = al_reg_read32(mac_10g_regs_base + 0x220); /*frames_ok*/ + /* aOctetsReceivedOK = ifInOctets - 18 * aFramesReceivedOK - 4 * VLANReceivedOK */ + octets = al_reg_read32(mac_10g_regs_base + 0x108); /*OctetsOK*/ + octets |= (uint64_t)(al_reg_read32(mac_10g_regs_base + 0x10C)) << 32; + octets -= 18 * stats->aFramesReceivedOK; + octets -= 4 * al_reg_read32(mac_10g_regs_base + 0x130); /*VLANOK*/ + stats->aOctetsReceivedOK = octets; + + /* aOctetsTransmittedOK = ifOutOctets - 18 * aFramesTransmittedOK - 4 * VLANTransmittedOK */ + octets = al_reg_read32(mac_10g_regs_base + 0x208); /*OctetsOK*/ + octets |= (uint64_t)(al_reg_read32(mac_10g_regs_base + 0x20c)) << 32; + octets -= 18 * stats->aFramesTransmittedOK; + octets -= 4 * al_reg_read32(mac_10g_regs_base + 0x230); /*VLANOK*/ + stats->aOctetsTransmittedOK = octets; + + stats->etherStatsUndersizePkts = al_reg_read32(mac_10g_regs_base + 0x168); + stats->etherStatsFragments = al_reg_read32(mac_10g_regs_base + 0x1b8); + stats->etherStatsJabbers = al_reg_read32(mac_10g_regs_base + 0x1b0); + stats->etherStatsOversizePkts = al_reg_read32(mac_10g_regs_base + 0x1a8); + + stats->aFrameCheckSequenceErrors = al_reg_read32(mac_10g_regs_base + 0x128); /* CRCErrors */ + /* stats->aAlignmentErrors = al_reg_read32(mac_10g_regs_base + 0x98); */ /* not implemented */ + stats->etherStatsDropEvents = al_reg_read32(mac_10g_regs_base + 0x158); + } + } else { + uint64_t octets; + /* TODO - change to 64 bit */ + stats->ifInUcastPkts = al_eth_40g_mac_reg_read(adapter, 0x140); + stats->ifInMulticastPkts = al_eth_40g_mac_reg_read(adapter, 0x148); + stats->ifInBroadcastPkts = al_eth_40g_mac_reg_read(adapter, 0x150); + stats->etherStatsPkts = al_eth_40g_mac_reg_read(adapter, 0x160); + stats->ifOutUcastPkts = al_eth_40g_mac_reg_read(adapter, 0x240); + stats->ifOutMulticastPkts = al_eth_40g_mac_reg_read(adapter, 0x248); + stats->ifOutBroadcastPkts = al_eth_40g_mac_reg_read(adapter, 0x250); + stats->ifInErrors = al_eth_40g_mac_reg_read(adapter, 0x138); + stats->ifOutErrors = al_eth_40g_mac_reg_read(adapter, 0x238); + stats->aFramesReceivedOK = al_eth_40g_mac_reg_read(adapter, 0x120); + stats->aFramesTransmittedOK = al_eth_40g_mac_reg_read(adapter, 0x220); + + /* aOctetsReceivedOK = ifInOctets - 18 * aFramesReceivedOK - 4 * VLANReceivedOK */ + octets = al_eth_40g_mac_reg_read(adapter, 0x100); + octets |= (uint64_t)(al_eth_40g_mac_reg_read(adapter, 0x104)) << 32; + octets -= 18 * stats->aFramesReceivedOK; + octets -= 4 * al_eth_40g_mac_reg_read(adapter, 0x130); /*VLANOK*/ + stats->aOctetsTransmittedOK = octets; + + /* aOctetsTransmittedOK = ifOutOctets - 18 * aFramesTransmittedOK - 4 * VLANTransmittedOK */ + octets = al_eth_40g_mac_reg_read(adapter, 0x200); + octets |= (uint64_t)(al_eth_40g_mac_reg_read(adapter, 0x204)) << 32; + octets -= 18 * stats->aFramesReceivedOK; + octets -= 4 * al_eth_40g_mac_reg_read(adapter, 0x230); /*VLANOK*/ + stats->aOctetsReceivedOK = octets; + + stats->etherStatsUndersizePkts = al_eth_40g_mac_reg_read(adapter, 0x168); + stats->etherStatsFragments = al_eth_40g_mac_reg_read(adapter, 0x1b8); + stats->etherStatsJabbers = al_eth_40g_mac_reg_read(adapter, 0x1b0); + stats->etherStatsOversizePkts = al_eth_40g_mac_reg_read(adapter, 0x1a8); + stats->aFrameCheckSequenceErrors = al_eth_40g_mac_reg_read(adapter, 0x128); + stats->aAlignmentErrors = al_eth_40g_mac_reg_read(adapter, 0x110); + stats->etherStatsDropEvents = al_eth_40g_mac_reg_read(adapter, 0x158); + } + + stats->eee_in = al_reg_read32(&adapter->mac_regs_base->stat.eee_in); + stats->eee_out = al_reg_read32(&adapter->mac_regs_base->stat.eee_out); + +/* stats->etherStatsPkts = 1; */ + return 0; +} + +/** +* read ec_stat_counters +*/ +int al_eth_ec_stats_get(struct al_hal_eth_adapter *adapter, struct al_eth_ec_stats *stats) +{ + al_assert(stats); + stats->faf_in_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.faf_in_rx_pkt); + stats->faf_in_rx_short = al_reg_read32(&adapter->ec_regs_base->stat.faf_in_rx_short); + stats->faf_in_rx_long = al_reg_read32(&adapter->ec_regs_base->stat.faf_in_rx_long); + stats->faf_out_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.faf_out_rx_pkt); + stats->faf_out_rx_short = al_reg_read32(&adapter->ec_regs_base->stat.faf_out_rx_short); + stats->faf_out_rx_long = al_reg_read32(&adapter->ec_regs_base->stat.faf_out_rx_long); + stats->faf_out_drop = al_reg_read32(&adapter->ec_regs_base->stat.faf_out_drop); + stats->rxf_in_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.rxf_in_rx_pkt); + stats->rxf_in_fifo_err = al_reg_read32(&adapter->ec_regs_base->stat.rxf_in_fifo_err); + stats->lbf_in_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.lbf_in_rx_pkt); + stats->lbf_in_fifo_err = al_reg_read32(&adapter->ec_regs_base->stat.lbf_in_fifo_err); + stats->rxf_out_rx_1_pkt = al_reg_read32(&adapter->ec_regs_base->stat.rxf_out_rx_1_pkt); + stats->rxf_out_rx_2_pkt = al_reg_read32(&adapter->ec_regs_base->stat.rxf_out_rx_2_pkt); + stats->rxf_out_drop_1_pkt = al_reg_read32(&adapter->ec_regs_base->stat.rxf_out_drop_1_pkt); + stats->rxf_out_drop_2_pkt = al_reg_read32(&adapter->ec_regs_base->stat.rxf_out_drop_2_pkt); + stats->rpe_1_in_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.rpe_1_in_rx_pkt); + stats->rpe_1_out_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.rpe_1_out_rx_pkt); + stats->rpe_2_in_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.rpe_2_in_rx_pkt); + stats->rpe_2_out_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.rpe_2_out_rx_pkt); + stats->rpe_3_in_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.rpe_3_in_rx_pkt); + stats->rpe_3_out_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.rpe_3_out_rx_pkt); + stats->tpe_in_tx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.tpe_in_tx_pkt); + stats->tpe_out_tx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.tpe_out_tx_pkt); + stats->tpm_tx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.tpm_tx_pkt); + stats->tfw_in_tx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.tfw_in_tx_pkt); + stats->tfw_out_tx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.tfw_out_tx_pkt); + stats->rfw_in_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat.rfw_in_rx_pkt); + stats->rfw_in_vlan_drop = al_reg_read32(&adapter->ec_regs_base->stat.rfw_in_vlan_drop); + stats->rfw_in_parse_drop = al_reg_read32(&adapter->ec_regs_base->stat.rfw_in_parse_drop); + stats->rfw_in_mc = al_reg_read32(&adapter->ec_regs_base->stat.rfw_in_mc); + stats->rfw_in_bc = al_reg_read32(&adapter->ec_regs_base->stat.rfw_in_bc); + stats->rfw_in_vlan_exist = al_reg_read32(&adapter->ec_regs_base->stat.rfw_in_vlan_exist); + stats->rfw_in_vlan_nexist = al_reg_read32(&adapter->ec_regs_base->stat.rfw_in_vlan_nexist); + stats->rfw_in_mac_drop = al_reg_read32(&adapter->ec_regs_base->stat.rfw_in_mac_drop); + stats->rfw_in_mac_ndet_drop = al_reg_read32(&adapter->ec_regs_base->stat.rfw_in_mac_ndet_drop); + stats->rfw_in_ctrl_drop = al_reg_read32(&adapter->ec_regs_base->stat.rfw_in_ctrl_drop); + stats->rfw_in_prot_i_drop = al_reg_read32(&adapter->ec_regs_base->stat.rfw_in_prot_i_drop); + stats->eee_in = al_reg_read32(&adapter->ec_regs_base->stat.eee_in); + return 0; +} + +/** + * read per_udma_counters + */ +int al_eth_ec_stat_udma_get(struct al_hal_eth_adapter *adapter, uint8_t idx, struct al_eth_ec_stat_udma *stats) +{ + + al_assert(idx <= 3); /*valid udma_id*/ + al_assert(stats); + stats->rfw_out_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].rfw_out_rx_pkt); + stats->rfw_out_drop = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].rfw_out_drop); + stats->msw_in_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].msw_in_rx_pkt); + stats->msw_drop_q_full = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].msw_drop_q_full); + stats->msw_drop_sop = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].msw_drop_sop); + stats->msw_drop_eop = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].msw_drop_eop); + stats->msw_wr_eop = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].msw_wr_eop); + stats->msw_out_rx_pkt = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].msw_out_rx_pkt); + stats->tso_no_tso_pkt = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tso_no_tso_pkt); + stats->tso_tso_pkt = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tso_tso_pkt); + stats->tso_seg_pkt = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tso_seg_pkt); + stats->tso_pad_pkt = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tso_pad_pkt); + stats->tpm_tx_spoof = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tpm_tx_spoof); + stats->tmi_in_tx_pkt = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tmi_in_tx_pkt); + stats->tmi_out_to_mac = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tmi_out_to_mac); + stats->tmi_out_to_rx = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tmi_out_to_rx); + stats->tx_q0_bytes = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tx_q0_bytes); + stats->tx_q1_bytes = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tx_q1_bytes); + stats->tx_q2_bytes = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tx_q2_bytes); + stats->tx_q3_bytes = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tx_q3_bytes); + stats->tx_q0_pkts = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tx_q0_pkts); + stats->tx_q1_pkts = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tx_q1_pkts); + stats->tx_q2_pkts = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tx_q2_pkts); + stats->tx_q3_pkts = al_reg_read32(&adapter->ec_regs_base->stat_udma[idx].tx_q3_pkts); + return 0; +} + +/* Traffic control */ + + +int al_eth_flr_rmn(int (* pci_read_config_u32)(void *handle, int where, uint32_t *val), + int (* pci_write_config_u32)(void *handle, int where, uint32_t val), + void *handle, + void __iomem *mac_base) +{ + struct al_eth_mac_regs __iomem *mac_regs_base = + (struct al_eth_mac_regs __iomem *)mac_base; + uint32_t cfg_reg_store[6]; + uint32_t reg; + uint32_t mux_sel; + int i = 0; + + (*pci_read_config_u32)(handle, AL_ADAPTER_GENERIC_CONTROL_0, ®); + + /* reset 1G mac */ + AL_REG_MASK_SET(reg, AL_ADAPTER_GENERIC_CONTROL_0_ETH_RESET_1GMAC); + (*pci_write_config_u32)(handle, AL_ADAPTER_GENERIC_CONTROL_0, reg); + al_udelay(1000); + /* don't reset 1G mac */ + AL_REG_MASK_CLEAR(reg, AL_ADAPTER_GENERIC_CONTROL_0_ETH_RESET_1GMAC); + /* prevent 1G mac reset on FLR */ + AL_REG_MASK_CLEAR(reg, AL_ADAPTER_GENERIC_CONTROL_0_ETH_RESET_1GMAC_ON_FLR); + /* prevent adapter reset */ + (*pci_write_config_u32)(handle, AL_ADAPTER_GENERIC_CONTROL_0, reg); + + mux_sel = al_reg_read32(&mac_regs_base->gen.mux_sel); + + /* save pci register that get reset due to flr*/ + (*pci_read_config_u32)(handle, AL_PCI_COMMAND, &cfg_reg_store[i++]); + (*pci_read_config_u32)(handle, 0xC, &cfg_reg_store[i++]); + (*pci_read_config_u32)(handle, 0x10, &cfg_reg_store[i++]); + (*pci_read_config_u32)(handle, 0x18, &cfg_reg_store[i++]); + (*pci_read_config_u32)(handle, 0x20, &cfg_reg_store[i++]); + (*pci_read_config_u32)(handle, 0x110, &cfg_reg_store[i++]); + + /* do flr */ + (*pci_write_config_u32)(handle, AL_PCI_EXP_CAP_BASE + AL_PCI_EXP_DEVCTL, AL_PCI_EXP_DEVCTL_BCR_FLR); + al_udelay(1000); + /* restore command */ + i = 0; + (*pci_write_config_u32)(handle, AL_PCI_COMMAND, cfg_reg_store[i++]); + (*pci_write_config_u32)(handle, 0xC, cfg_reg_store[i++]); + (*pci_write_config_u32)(handle, 0x10, cfg_reg_store[i++]); + (*pci_write_config_u32)(handle, 0x18, cfg_reg_store[i++]); + (*pci_write_config_u32)(handle, 0x20, cfg_reg_store[i++]); + (*pci_write_config_u32)(handle, 0x110, cfg_reg_store[i++]); + + al_reg_write32_masked(&mac_regs_base->gen.mux_sel, ETH_MAC_GEN_MUX_SEL_KR_IN_MASK, mux_sel); + + /* set SGMII clock to 125MHz */ + al_reg_write32(mac_base + 0xB08, 0x03320501); + + /* reset 1G mac */ + AL_REG_MASK_SET(reg, AL_ADAPTER_GENERIC_CONTROL_0_ETH_RESET_1GMAC); + (*pci_write_config_u32)(handle, AL_ADAPTER_GENERIC_CONTROL_0, reg); + + al_udelay(1000); + + /* clear 1G mac reset */ + AL_REG_MASK_CLEAR(reg, AL_ADAPTER_GENERIC_CONTROL_0_ETH_RESET_1GMAC); + (*pci_write_config_u32)(handle, AL_ADAPTER_GENERIC_CONTROL_0, reg); + + /* reset SGMII mac clock to default */ + al_reg_write32(mac_base + 0xB08, 0x00320501); + al_udelay(1000); + /* reset async fifo */ + reg = al_reg_read32(mac_base + 0x95c); + AL_REG_MASK_SET(reg, 0xF0); + al_reg_write32(mac_base + 0x95c, reg); + reg = al_reg_read32(mac_base + 0x95c); + AL_REG_MASK_CLEAR(reg, 0xF0); + al_reg_write32(mac_base + 0x95c, reg); + + return 0; +} + +int al_eth_flr_rmn_restore_params(int (* pci_read_config_u32)(void *handle, int where, uint32_t *val), + int (* pci_write_config_u32)(void *handle, int where, uint32_t val), + void *handle, + void __iomem *mac_base, + void __iomem *ec_base, + int mac_addresses_num + ) +{ + struct al_eth_board_params params = { .media_type = 0 }; + uint8_t mac_addr[6]; + int rc; + + /* not implemented yet */ + if (mac_addresses_num > 1) + return -EPERM; + + /* save board params so we restore it after reset */ + al_eth_board_params_get(mac_base, ¶ms); + al_eth_mac_addr_read(ec_base, 0, mac_addr); + + rc = al_eth_flr_rmn(pci_read_config_u32, pci_write_config_u32, handle, mac_base); + al_eth_board_params_set(mac_base, ¶ms); + al_eth_mac_addr_store(ec_base, 0, mac_addr); + + return rc; +} + +/* board params register 1 */ +#define AL_HAL_ETH_MEDIA_TYPE_MASK (AL_FIELD_MASK(3, 0)) +#define AL_HAL_ETH_MEDIA_TYPE_SHIFT 0 +#define AL_HAL_ETH_EXT_PHY_SHIFT 4 +#define AL_HAL_ETH_PHY_ADDR_MASK (AL_FIELD_MASK(9, 5)) +#define AL_HAL_ETH_PHY_ADDR_SHIFT 5 +#define AL_HAL_ETH_SFP_EXIST_SHIFT 10 +#define AL_HAL_ETH_AN_ENABLE_SHIFT 11 +#define AL_HAL_ETH_KR_LT_ENABLE_SHIFT 12 +#define AL_HAL_ETH_KR_FEC_ENABLE_SHIFT 13 +#define AL_HAL_ETH_MDIO_FREQ_MASK (AL_FIELD_MASK(15, 14)) +#define AL_HAL_ETH_MDIO_FREQ_SHIFT 14 +#define AL_HAL_ETH_I2C_ADAPTER_ID_MASK (AL_FIELD_MASK(19, 16)) +#define AL_HAL_ETH_I2C_ADAPTER_ID_SHIFT 16 +#define AL_HAL_ETH_EXT_PHY_IF_MASK (AL_FIELD_MASK(21, 20)) +#define AL_HAL_ETH_EXT_PHY_IF_SHIFT 20 +#define AL_HAL_ETH_AUTO_NEG_MODE_SHIFT 22 +#define AL_HAL_ETH_SERDES_GRP_MASK (AL_FIELD_MASK(26, 25)) +#define AL_HAL_ETH_SERDES_GRP_SHIFT 25 +#define AL_HAL_ETH_SERDES_LANE_MASK (AL_FIELD_MASK(28, 27)) +#define AL_HAL_ETH_SERDES_LANE_SHIFT 27 +#define AL_HAL_ETH_REF_CLK_FREQ_MASK (AL_FIELD_MASK(31, 29)) +#define AL_HAL_ETH_REF_CLK_FREQ_SHIFT 29 + +/* board params register 2 */ +#define AL_HAL_ETH_DONT_OVERRIDE_SERDES_SHIFT 0 +#define AL_HAL_ETH_1000_BASE_X_SHIFT 1 +#define AL_HAL_ETH_1G_AN_DISABLE_SHIFT 2 +#define AL_HAL_ETH_1G_SPEED_MASK (AL_FIELD_MASK(4, 3)) +#define AL_HAL_ETH_1G_SPEED_SHIFT 3 +#define AL_HAL_ETH_1G_HALF_DUPLEX_SHIFT 5 +#define AL_HAL_ETH_1G_FC_DISABLE_SHIFT 6 +#define AL_HAL_ETH_RETIMER_EXIST_SHIFT 7 +#define AL_HAL_ETH_RETIMER_BUS_ID_MASK (AL_FIELD_MASK(11, 8)) +#define AL_HAL_ETH_RETIMER_BUS_ID_SHIFT 8 +#define AL_HAL_ETH_RETIMER_I2C_ADDR_MASK (AL_FIELD_MASK(18, 12)) +#define AL_HAL_ETH_RETIMER_I2C_ADDR_SHIFT 12 +#define AL_HAL_ETH_RETIMER_CHANNEL_SHIFT 19 +#define AL_HAL_ETH_DAC_LENGTH_MASK (AL_FIELD_MASK(23, 20)) +#define AL_HAL_ETH_DAC_LENGTH_SHIFT 20 +#define AL_HAL_ETH_DAC_SHIFT 24 + +int al_eth_board_params_set(void * __iomem mac_base, struct al_eth_board_params *params){ + uint32_t reg = 0; + + /* ************* Setting Board params register 1 **************** */ + AL_REG_FIELD_SET(reg, AL_HAL_ETH_MEDIA_TYPE_MASK, + AL_HAL_ETH_MEDIA_TYPE_SHIFT, params->media_type); + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_EXT_PHY_SHIFT, params->phy_exist == AL_TRUE); + AL_REG_FIELD_SET(reg, AL_HAL_ETH_PHY_ADDR_MASK, + AL_HAL_ETH_PHY_ADDR_SHIFT, params->phy_mdio_addr); + + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_SFP_EXIST_SHIFT, params->sfp_plus_module_exist == AL_TRUE); + + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_AN_ENABLE_SHIFT, params->autoneg_enable == AL_TRUE); + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_KR_LT_ENABLE_SHIFT, params->kr_lt_enable == AL_TRUE); + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_KR_FEC_ENABLE_SHIFT, params->kr_fec_enable == AL_TRUE); + AL_REG_FIELD_SET(reg, AL_HAL_ETH_MDIO_FREQ_MASK, + AL_HAL_ETH_MDIO_FREQ_SHIFT, params->mdio_freq); + AL_REG_FIELD_SET(reg, AL_HAL_ETH_I2C_ADAPTER_ID_MASK, + AL_HAL_ETH_I2C_ADAPTER_ID_SHIFT, params->i2c_adapter_id); + AL_REG_FIELD_SET(reg, AL_HAL_ETH_EXT_PHY_IF_MASK, + AL_HAL_ETH_EXT_PHY_IF_SHIFT, params->phy_if); + + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_AUTO_NEG_MODE_SHIFT, + params->an_mode == AL_ETH_BOARD_AUTONEG_IN_BAND); + + AL_REG_FIELD_SET(reg, AL_HAL_ETH_SERDES_GRP_MASK, + AL_HAL_ETH_SERDES_GRP_SHIFT, params->serdes_grp); + AL_REG_FIELD_SET(reg, AL_HAL_ETH_SERDES_LANE_MASK, + AL_HAL_ETH_SERDES_LANE_SHIFT, params->serdes_lane); + + AL_REG_FIELD_SET(reg, AL_HAL_ETH_REF_CLK_FREQ_MASK, + AL_HAL_ETH_REF_CLK_FREQ_SHIFT, params->ref_clk_freq); + + al_assert(reg != 0); + + al_reg_write32(mac_base + 0x4, reg); + + /* ************* Setting Board params register 2 **************** */ + reg = 0; + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_DONT_OVERRIDE_SERDES_SHIFT, + params->dont_override_serdes == AL_TRUE); + + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_1000_BASE_X_SHIFT, + params->force_1000_base_x == AL_TRUE); + + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_1G_AN_DISABLE_SHIFT, + params->an_disable == AL_TRUE); + + AL_REG_FIELD_SET(reg, AL_HAL_ETH_1G_SPEED_MASK, + AL_HAL_ETH_1G_SPEED_SHIFT, params->speed); + + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_1G_HALF_DUPLEX_SHIFT, + params->half_duplex == AL_TRUE); + + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_1G_FC_DISABLE_SHIFT, + params->fc_disable == AL_TRUE); + + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_RETIMER_EXIST_SHIFT, params->retimer_exist == AL_TRUE); + AL_REG_FIELD_SET(reg, AL_HAL_ETH_RETIMER_BUS_ID_MASK, + AL_HAL_ETH_RETIMER_BUS_ID_SHIFT, params->retimer_bus_id); + AL_REG_FIELD_SET(reg, AL_HAL_ETH_RETIMER_I2C_ADDR_MASK, + AL_HAL_ETH_RETIMER_I2C_ADDR_SHIFT, params->retimer_i2c_addr); + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_RETIMER_CHANNEL_SHIFT, params->retimer_channel); + + AL_REG_FIELD_SET(reg, AL_HAL_ETH_DAC_LENGTH_MASK, + AL_HAL_ETH_DAC_LENGTH_SHIFT, params->dac_len); + AL_REG_BIT_VAL_SET(reg, AL_HAL_ETH_DAC_SHIFT, params->dac); + + al_reg_write32(mac_base + 0x404, reg); + return 0; +} + +int al_eth_board_params_get(void * __iomem mac_base, struct al_eth_board_params *params){ + uint32_t reg = al_reg_read32(mac_base + 0x4); + + /* check if the register was initialized, 0 is not a valid value */ + if (reg == 0) + return -ENOENT; + + /* ************* Getting Board params register 1 **************** */ + params->media_type = AL_REG_FIELD_GET(reg, AL_HAL_ETH_MEDIA_TYPE_MASK, + AL_HAL_ETH_MEDIA_TYPE_SHIFT); + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_EXT_PHY_SHIFT)) + params->phy_exist = AL_TRUE; + else + params->phy_exist = AL_FALSE; + + params->phy_mdio_addr = AL_REG_FIELD_GET(reg, AL_HAL_ETH_PHY_ADDR_MASK, + AL_HAL_ETH_PHY_ADDR_SHIFT); + + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_SFP_EXIST_SHIFT)) + params->sfp_plus_module_exist = AL_TRUE; + else + params->sfp_plus_module_exist = AL_FALSE; + + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_AN_ENABLE_SHIFT)) + params->autoneg_enable = AL_TRUE; + else + params->autoneg_enable = AL_FALSE; + + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_KR_LT_ENABLE_SHIFT)) + params->kr_lt_enable = AL_TRUE; + else + params->kr_lt_enable = AL_FALSE; + + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_KR_FEC_ENABLE_SHIFT)) + params->kr_fec_enable = AL_TRUE; + else + params->kr_fec_enable = AL_FALSE; + + params->mdio_freq = AL_REG_FIELD_GET(reg, + AL_HAL_ETH_MDIO_FREQ_MASK, + AL_HAL_ETH_MDIO_FREQ_SHIFT); + + params->i2c_adapter_id = AL_REG_FIELD_GET(reg, + AL_HAL_ETH_I2C_ADAPTER_ID_MASK, + AL_HAL_ETH_I2C_ADAPTER_ID_SHIFT); + + params->phy_if = AL_REG_FIELD_GET(reg, + AL_HAL_ETH_EXT_PHY_IF_MASK, + AL_HAL_ETH_EXT_PHY_IF_SHIFT); + + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_AUTO_NEG_MODE_SHIFT)) + params->an_mode = AL_TRUE; + else + params->an_mode = AL_FALSE; + + params->serdes_grp = AL_REG_FIELD_GET(reg, + AL_HAL_ETH_SERDES_GRP_MASK, + AL_HAL_ETH_SERDES_GRP_SHIFT); + + params->serdes_lane = AL_REG_FIELD_GET(reg, + AL_HAL_ETH_SERDES_LANE_MASK, + AL_HAL_ETH_SERDES_LANE_SHIFT); + + params->ref_clk_freq = AL_REG_FIELD_GET(reg, + AL_HAL_ETH_REF_CLK_FREQ_MASK, + AL_HAL_ETH_REF_CLK_FREQ_SHIFT); + + /* ************* Getting Board params register 2 **************** */ + reg = al_reg_read32(mac_base + 0x404); + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_DONT_OVERRIDE_SERDES_SHIFT)) + params->dont_override_serdes = AL_TRUE; + else + params->dont_override_serdes = AL_FALSE; + + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_1000_BASE_X_SHIFT)) + params->force_1000_base_x = AL_TRUE; + else + params->force_1000_base_x = AL_FALSE; + + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_1G_AN_DISABLE_SHIFT)) + params->an_disable = AL_TRUE; + else + params->an_disable = AL_FALSE; + + params->speed = AL_REG_FIELD_GET(reg, + AL_HAL_ETH_1G_SPEED_MASK, + AL_HAL_ETH_1G_SPEED_SHIFT); + + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_1G_HALF_DUPLEX_SHIFT)) + params->half_duplex = AL_TRUE; + else + params->half_duplex = AL_FALSE; + + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_1G_FC_DISABLE_SHIFT)) + params->fc_disable = AL_TRUE; + else + params->fc_disable = AL_FALSE; + + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_RETIMER_EXIST_SHIFT)) + params->retimer_exist = AL_TRUE; + else + params->retimer_exist = AL_FALSE; + + params->retimer_bus_id = AL_REG_FIELD_GET(reg, + AL_HAL_ETH_RETIMER_BUS_ID_MASK, + AL_HAL_ETH_RETIMER_BUS_ID_SHIFT); + params->retimer_i2c_addr = AL_REG_FIELD_GET(reg, + AL_HAL_ETH_RETIMER_I2C_ADDR_MASK, + AL_HAL_ETH_RETIMER_I2C_ADDR_SHIFT); + + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_RETIMER_CHANNEL_SHIFT)) + params->retimer_channel = AL_ETH_RETIMER_CHANNEL_B; + else + params->retimer_channel = AL_ETH_RETIMER_CHANNEL_A; + + params->dac_len = AL_REG_FIELD_GET(reg, + AL_HAL_ETH_DAC_LENGTH_MASK, + AL_HAL_ETH_DAC_LENGTH_SHIFT); + + if (AL_REG_BIT_GET(reg, AL_HAL_ETH_DAC_SHIFT)) + params->dac = AL_TRUE; + else + params->dac = AL_FALSE; + + return 0; +} + +/* Wake-On-Lan (WoL) */ +static inline void al_eth_byte_arr_to_reg( + uint32_t *reg, uint8_t *arr, unsigned int num_bytes) +{ + uint32_t mask = 0xff; + unsigned int i; + + al_assert(num_bytes <= 4); + + *reg = 0; + + for (i = 0 ; i < num_bytes ; i++) { + AL_REG_FIELD_SET(*reg, mask, (sizeof(uint8_t) * i), arr[i]); + mask = mask << sizeof(uint8_t); + } +} + +int al_eth_wol_enable( + struct al_hal_eth_adapter *adapter, + struct al_eth_wol_params *wol) +{ + uint32_t reg = 0; + + if (wol->int_mask & AL_ETH_WOL_INT_MAGIC_PSWD) { + al_assert(wol->pswd != NULL); + + al_eth_byte_arr_to_reg(®, &wol->pswd[0], 4); + al_reg_write32(&adapter->ec_regs_base->wol.magic_pswd_l, reg); + + al_eth_byte_arr_to_reg(®, &wol->pswd[4], 2); + al_reg_write32(&adapter->ec_regs_base->wol.magic_pswd_h, reg); + } + + if (wol->int_mask & AL_ETH_WOL_INT_IPV4) { + al_assert(wol->ipv4 != NULL); + + al_eth_byte_arr_to_reg(®, &wol->ipv4[0], 4); + al_reg_write32(&adapter->ec_regs_base->wol.ipv4_dip, reg); + } + + if (wol->int_mask & AL_ETH_WOL_INT_IPV6) { + al_assert(wol->ipv6 != NULL); + + al_eth_byte_arr_to_reg(®, &wol->ipv6[0], 4); + al_reg_write32(&adapter->ec_regs_base->wol.ipv6_dip_word0, reg); + + al_eth_byte_arr_to_reg(®, &wol->ipv6[4], 4); + al_reg_write32(&adapter->ec_regs_base->wol.ipv6_dip_word1, reg); + + al_eth_byte_arr_to_reg(®, &wol->ipv6[8], 4); + al_reg_write32(&adapter->ec_regs_base->wol.ipv6_dip_word2, reg); + + al_eth_byte_arr_to_reg(®, &wol->ipv6[12], 4); + al_reg_write32(&adapter->ec_regs_base->wol.ipv6_dip_word3, reg); + } + + if (wol->int_mask & + (AL_ETH_WOL_INT_ETHERTYPE_BC | AL_ETH_WOL_INT_ETHERTYPE_DA)) { + + reg = ((uint32_t)wol->ethr_type2 << 16); + reg |= wol->ethr_type1; + + al_reg_write32(&adapter->ec_regs_base->wol.ethertype, reg); + } + + /* make sure we dont forwarding packets without interrupt */ + al_assert((wol->forward_mask | wol->int_mask) == wol->int_mask); + + reg = ((uint32_t)wol->forward_mask << 16); + reg |= wol->int_mask; + al_reg_write32(&adapter->ec_regs_base->wol.wol_en, reg); + + return 0; +} + +int al_eth_wol_disable( + struct al_hal_eth_adapter *adapter) +{ + al_reg_write32(&adapter->ec_regs_base->wol.wol_en, 0); + + return 0; +} + +int al_eth_tx_fwd_vid_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, + uint8_t udma_mask, al_bool fwd_to_mac) +{ + uint32_t val = 0; + al_assert(idx < AL_ETH_FWD_VID_TABLE_NUM); /* valid VID index */ + AL_REG_FIELD_SET(val, AL_ETH_TX_VLAN_TABLE_UDMA_MASK, 0, udma_mask); + AL_REG_FIELD_SET(val, AL_ETH_TX_VLAN_TABLE_FWD_TO_MAC, 4, fwd_to_mac); + + al_reg_write32(&adapter->ec_regs_base->tfw.tx_vid_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->tfw.tx_vid_table_data, val); + return 0; +} + +int al_eth_tx_generic_crc_entry_set(struct al_hal_eth_adapter *adapter, uint32_t idx, + struct al_eth_tx_gpd_cam_entry *tx_gpd_entry, + struct al_eth_tx_gcp_table_entry *tx_gcp_entry, + struct al_eth_tx_crc_chksum_replace_cmd_for_protocol_num_entry *tx_replace_entry) +{ + uint64_t gpd_data; + uint64_t gpd_mask; + uint32_t gcp_table_gen; + uint32_t tx_alu_opcode; + uint32_t tx_alu_opsel; + uint32_t replace_table_address; + uint32_t tx_replace_cmd; + + gpd_data = ((uint64_t)tx_gpd_entry->l3_proto_idx & AL_ETH_TX_GPD_L3_PROTO_MASK) << + AL_ETH_TX_GPD_L3_PROTO_SHIFT; + gpd_data |= ((uint64_t)tx_gpd_entry->l4_proto_idx & AL_ETH_TX_GPD_L4_PROTO_MASK) << + AL_ETH_TX_GPD_L4_PROTO_SHIFT; + gpd_data |= ((uint64_t)tx_gpd_entry->tunnel_control & AL_ETH_TX_GPD_TUNNEL_CTRL_MASK) << + AL_ETH_TX_GPD_TUNNEL_CTRL_SHIFT; + gpd_data |= ((uint64_t)tx_gpd_entry->source_vlan_count & AL_ETH_TX_GPD_SRC_VLAN_CNT_MASK) << + AL_ETH_TX_GPD_SRC_VLAN_CNT_SHIFT; + gpd_mask = ((uint64_t)tx_gpd_entry->l3_proto_idx_mask & AL_ETH_TX_GPD_L3_PROTO_MASK) << + AL_ETH_TX_GPD_L3_PROTO_SHIFT; + gpd_mask |= ((uint64_t)tx_gpd_entry->l4_proto_idx_mask & AL_ETH_TX_GPD_L4_PROTO_MASK) << + AL_ETH_TX_GPD_L4_PROTO_SHIFT; + gpd_mask |= ((uint64_t)tx_gpd_entry->tunnel_control_mask & AL_ETH_TX_GPD_TUNNEL_CTRL_MASK) << + AL_ETH_TX_GPD_TUNNEL_CTRL_SHIFT; + gpd_mask |= ((uint64_t)tx_gpd_entry->source_vlan_count_mask & AL_ETH_TX_GPD_SRC_VLAN_CNT_MASK) << + AL_ETH_TX_GPD_SRC_VLAN_CNT_SHIFT; + + /* Tx Generic protocol detect Cam compare table */ + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gpd_cam_addr, idx); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gpd_cam_ctrl, + (uint32_t)((tx_gpd_entry->tx_gpd_cam_ctrl) << AL_ETH_TX_GPD_CAM_CTRL_VALID_SHIFT)); + al_dbg("al_eth_tx_generic_crc_entry_set, line [%d], tx_gpd_cam_ctrl: %#x", idx, tx_gpd_entry->tx_gpd_cam_ctrl); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gpd_cam_mask_2, + (uint32_t)(gpd_mask >> AL_ETH_TX_GPD_CAM_MASK_2_SHIFT)); + al_dbg("al_eth_tx_generic_crc_entry_set, line [%d], tx_gpd_cam_mask_2: %#x", idx, (uint32_t)(gpd_mask >> AL_ETH_TX_GPD_CAM_MASK_2_SHIFT)); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gpd_cam_mask_1, + (uint32_t)(gpd_mask)); + al_dbg("al_eth_tx_generic_crc_entry_set, line [%d], tx_gpd_cam_mask_1: %#x", idx, (uint32_t)(gpd_mask)); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gpd_cam_data_2, + (uint32_t)(gpd_data >> AL_ETH_TX_GPD_CAM_DATA_2_SHIFT)); + al_dbg("al_eth_tx_generic_crc_entry_set, line [%d], tx_gpd_cam_data_2: %#x", idx, (uint32_t)(gpd_data >> AL_ETH_TX_GPD_CAM_DATA_2_SHIFT)); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gpd_cam_data_1, + (uint32_t)(gpd_data)); + al_dbg("al_eth_tx_generic_crc_entry_set, line [%d], tx_gpd_cam_data_1: %#x", idx, (uint32_t)(gpd_data)); + + gcp_table_gen = (tx_gcp_entry->poly_sel & AL_ETH_TX_GCP_POLY_SEL_MASK) << + AL_ETH_TX_GCP_POLY_SEL_SHIFT; + gcp_table_gen |= (tx_gcp_entry->crc32_bit_comp & AL_ETH_TX_GCP_CRC32_BIT_COMP_MASK) << + AL_ETH_TX_GCP_CRC32_BIT_COMP_SHIFT; + gcp_table_gen |= (tx_gcp_entry->crc32_bit_swap & AL_ETH_TX_GCP_CRC32_BIT_SWAP_MASK) << + AL_ETH_TX_GCP_CRC32_BIT_SWAP_SHIFT; + gcp_table_gen |= (tx_gcp_entry->crc32_byte_swap & AL_ETH_TX_GCP_CRC32_BYTE_SWAP_MASK) << + AL_ETH_TX_GCP_CRC32_BYTE_SWAP_SHIFT; + gcp_table_gen |= (tx_gcp_entry->data_bit_swap & AL_ETH_TX_GCP_DATA_BIT_SWAP_MASK) << + AL_ETH_TX_GCP_DATA_BIT_SWAP_SHIFT; + gcp_table_gen |= (tx_gcp_entry->data_byte_swap & AL_ETH_TX_GCP_DATA_BYTE_SWAP_MASK) << + AL_ETH_TX_GCP_DATA_BYTE_SWAP_SHIFT; + gcp_table_gen |= (tx_gcp_entry->trail_size & AL_ETH_TX_GCP_TRAIL_SIZE_MASK) << + AL_ETH_TX_GCP_TRAIL_SIZE_SHIFT; + gcp_table_gen |= (tx_gcp_entry->head_size & AL_ETH_TX_GCP_HEAD_SIZE_MASK) << + AL_ETH_TX_GCP_HEAD_SIZE_SHIFT; + gcp_table_gen |= (tx_gcp_entry->head_calc & AL_ETH_TX_GCP_HEAD_CALC_MASK) << + AL_ETH_TX_GCP_HEAD_CALC_SHIFT; + gcp_table_gen |= (tx_gcp_entry->mask_polarity & AL_ETH_TX_GCP_MASK_POLARITY_MASK) << + AL_ETH_TX_GCP_MASK_POLARITY_SHIFT; + al_dbg("al_eth_tx_generic_crc_entry_set, line [%d], gcp_table_gen: %#x", idx, gcp_table_gen); + + tx_alu_opcode = (tx_gcp_entry->tx_alu_opcode_1 & AL_ETH_TX_GCP_OPCODE_1_MASK) << + AL_ETH_TX_GCP_OPCODE_1_SHIFT; + tx_alu_opcode |= (tx_gcp_entry->tx_alu_opcode_2 & AL_ETH_TX_GCP_OPCODE_2_MASK) << + AL_ETH_TX_GCP_OPCODE_2_SHIFT; + tx_alu_opcode |= (tx_gcp_entry->tx_alu_opcode_3 & AL_ETH_TX_GCP_OPCODE_3_MASK) << + AL_ETH_TX_GCP_OPCODE_3_SHIFT; + tx_alu_opsel = (tx_gcp_entry->tx_alu_opsel_1 & AL_ETH_TX_GCP_OPSEL_1_MASK) << + AL_ETH_TX_GCP_OPSEL_1_SHIFT; + tx_alu_opsel |= (tx_gcp_entry->tx_alu_opsel_2 & AL_ETH_TX_GCP_OPSEL_2_MASK) << + AL_ETH_TX_GCP_OPSEL_2_SHIFT; + tx_alu_opsel |= (tx_gcp_entry->tx_alu_opsel_3 & AL_ETH_TX_GCP_OPSEL_3_MASK) << + AL_ETH_TX_GCP_OPSEL_3_SHIFT; + tx_alu_opsel |= (tx_gcp_entry->tx_alu_opsel_4 & AL_ETH_TX_GCP_OPSEL_4_MASK) << + AL_ETH_TX_GCP_OPSEL_4_SHIFT; + + /* Tx Generic crc prameters table general */ + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_gen, + gcp_table_gen); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_mask_1, + tx_gcp_entry->gcp_mask[0]); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_mask_2, + tx_gcp_entry->gcp_mask[1]); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_mask_3, + tx_gcp_entry->gcp_mask[2]); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_mask_4, + tx_gcp_entry->gcp_mask[3]); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_mask_5, + tx_gcp_entry->gcp_mask[4]); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_mask_6, + tx_gcp_entry->gcp_mask[5]); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_crc_init, + tx_gcp_entry->crc_init); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_res, + tx_gcp_entry->gcp_table_res); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_alu_opcode, + tx_alu_opcode); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_alu_opsel, + tx_alu_opsel); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_table_alu_val, + tx_gcp_entry->alu_val); + + /* Tx crc_chksum_replace_cmd */ + replace_table_address = L4_CHECKSUM_DIS_AND_L3_CHECKSUM_DIS | idx; + tx_replace_cmd = (uint32_t)(tx_replace_entry->l3_csum_en_00) << 0; + tx_replace_cmd |= (uint32_t)(tx_replace_entry->l4_csum_en_00) << 1; + tx_replace_cmd |= (uint32_t)(tx_replace_entry->crc_en_00) << 2; + al_reg_write32(&adapter->ec_regs_base->tfw_v3.crc_csum_replace_table_addr, replace_table_address); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.crc_csum_replace_table, + tx_replace_cmd); + replace_table_address = L4_CHECKSUM_DIS_AND_L3_CHECKSUM_EN | idx; + tx_replace_cmd = (uint32_t)(tx_replace_entry->l3_csum_en_01) << 0; + tx_replace_cmd |= (uint32_t)(tx_replace_entry->l4_csum_en_01) << 1; + tx_replace_cmd |= (uint32_t)(tx_replace_entry->crc_en_01) << 2; + al_reg_write32(&adapter->ec_regs_base->tfw_v3.crc_csum_replace_table_addr, replace_table_address); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.crc_csum_replace_table, + tx_replace_cmd); + replace_table_address = L4_CHECKSUM_EN_AND_L3_CHECKSUM_DIS | idx; + tx_replace_cmd = (uint32_t)(tx_replace_entry->l3_csum_en_10) << 0; + tx_replace_cmd |= (uint32_t)(tx_replace_entry->l4_csum_en_10) << 1; + tx_replace_cmd |= (uint32_t)(tx_replace_entry->crc_en_10) << 2; + al_reg_write32(&adapter->ec_regs_base->tfw_v3.crc_csum_replace_table_addr, replace_table_address); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.crc_csum_replace_table, + tx_replace_cmd); + replace_table_address = L4_CHECKSUM_EN_AND_L3_CHECKSUM_EN | idx; + tx_replace_cmd = (uint32_t)(tx_replace_entry->l3_csum_en_11) << 0; + tx_replace_cmd |= (uint32_t)(tx_replace_entry->l4_csum_en_11) << 1; + tx_replace_cmd |= (uint32_t)(tx_replace_entry->crc_en_11) << 2; + al_reg_write32(&adapter->ec_regs_base->tfw_v3.crc_csum_replace_table_addr, replace_table_address); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.crc_csum_replace_table, + tx_replace_cmd); + + return 0; +} + +int al_eth_rx_generic_crc_entry_set(struct al_hal_eth_adapter *adapter, uint32_t idx, + struct al_eth_rx_gpd_cam_entry *rx_gpd_entry, + struct al_eth_rx_gcp_table_entry *rx_gcp_entry) +{ + uint64_t gpd_data; + uint64_t gpd_mask; + uint32_t gcp_table_gen; + uint32_t rx_alu_opcode; + uint32_t rx_alu_opsel; + + gpd_data = ((uint64_t)rx_gpd_entry->outer_l3_proto_idx & AL_ETH_RX_GPD_OUTER_L3_PROTO_MASK) << + AL_ETH_RX_GPD_OUTER_L3_PROTO_SHIFT; + gpd_data |= ((uint64_t)rx_gpd_entry->outer_l4_proto_idx & AL_ETH_RX_GPD_OUTER_L4_PROTO_MASK) << + AL_ETH_RX_GPD_OUTER_L4_PROTO_SHIFT; + gpd_data |= ((uint64_t)rx_gpd_entry->inner_l3_proto_idx & AL_ETH_RX_GPD_INNER_L3_PROTO_MASK) << + AL_ETH_RX_GPD_INNER_L3_PROTO_SHIFT; + gpd_data |= ((uint64_t)rx_gpd_entry->inner_l4_proto_idx & AL_ETH_RX_GPD_INNER_L4_PROTO_MASK) << + AL_ETH_RX_GPD_INNER_L4_PROTO_SHIFT; + gpd_data |= ((uint64_t)rx_gpd_entry->parse_ctrl & AL_ETH_RX_GPD_OUTER_PARSE_CTRL_MASK) << + AL_ETH_RX_GPD_OUTER_PARSE_CTRL_SHIFT; + gpd_data |= ((uint64_t)rx_gpd_entry->outer_l3_len & AL_ETH_RX_GPD_INNER_PARSE_CTRL_MASK) << + AL_ETH_RX_GPD_INNER_PARSE_CTRL_SHIFT; + gpd_data |= ((uint64_t)rx_gpd_entry->l3_priority & AL_ETH_RX_GPD_L3_PRIORITY_MASK) << + AL_ETH_RX_GPD_L3_PRIORITY_SHIFT; + gpd_data |= ((uint64_t)rx_gpd_entry->l4_dst_port_lsb & AL_ETH_RX_GPD_L4_DST_PORT_LSB_MASK) << + AL_ETH_RX_GPD_L4_DST_PORT_LSB_SHIFT; + + gpd_mask = ((uint64_t)rx_gpd_entry->outer_l3_proto_idx_mask & AL_ETH_RX_GPD_OUTER_L3_PROTO_MASK) << + AL_ETH_RX_GPD_OUTER_L3_PROTO_SHIFT; + gpd_mask |= ((uint64_t)rx_gpd_entry->outer_l4_proto_idx_mask & AL_ETH_RX_GPD_OUTER_L4_PROTO_MASK) << + AL_ETH_RX_GPD_OUTER_L4_PROTO_SHIFT; + gpd_mask |= ((uint64_t)rx_gpd_entry->inner_l3_proto_idx_mask & AL_ETH_RX_GPD_INNER_L3_PROTO_MASK) << + AL_ETH_RX_GPD_INNER_L3_PROTO_SHIFT; + gpd_mask |= ((uint64_t)rx_gpd_entry->inner_l4_proto_idx_mask & AL_ETH_RX_GPD_INNER_L4_PROTO_MASK) << + AL_ETH_RX_GPD_INNER_L4_PROTO_SHIFT; + gpd_mask |= ((uint64_t)rx_gpd_entry->parse_ctrl_mask & AL_ETH_RX_GPD_OUTER_PARSE_CTRL_MASK) << + AL_ETH_RX_GPD_OUTER_PARSE_CTRL_SHIFT; + gpd_mask |= ((uint64_t)rx_gpd_entry->outer_l3_len_mask & AL_ETH_RX_GPD_INNER_PARSE_CTRL_MASK) << + AL_ETH_RX_GPD_INNER_PARSE_CTRL_SHIFT; + gpd_mask |= ((uint64_t)rx_gpd_entry->l3_priority_mask & AL_ETH_RX_GPD_L3_PRIORITY_MASK) << + AL_ETH_RX_GPD_L3_PRIORITY_SHIFT; + gpd_mask |= ((uint64_t)rx_gpd_entry->l4_dst_port_lsb_mask & AL_ETH_RX_GPD_L4_DST_PORT_LSB_MASK) << + AL_ETH_RX_GPD_L4_DST_PORT_LSB_SHIFT; + + /* Rx Generic protocol detect Cam compare table */ + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gpd_cam_addr, idx); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gpd_cam_ctrl, + (uint32_t)((rx_gpd_entry->rx_gpd_cam_ctrl) << AL_ETH_RX_GPD_CAM_CTRL_VALID_SHIFT)); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gpd_cam_mask_2, + (uint32_t)(gpd_mask >> AL_ETH_RX_GPD_CAM_MASK_2_SHIFT)); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gpd_cam_mask_1, + (uint32_t)(gpd_mask)); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gpd_cam_data_2, + (uint32_t)(gpd_data >> AL_ETH_RX_GPD_CAM_DATA_2_SHIFT)); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gpd_cam_data_1, + (uint32_t)(gpd_data)); + + gcp_table_gen = (rx_gcp_entry->poly_sel & AL_ETH_RX_GCP_POLY_SEL_MASK) << + AL_ETH_RX_GCP_POLY_SEL_SHIFT; + gcp_table_gen |= (rx_gcp_entry->crc32_bit_comp & AL_ETH_RX_GCP_CRC32_BIT_COMP_MASK) << + AL_ETH_RX_GCP_CRC32_BIT_COMP_SHIFT; + gcp_table_gen |= (rx_gcp_entry->crc32_bit_swap & AL_ETH_RX_GCP_CRC32_BIT_SWAP_MASK) << + AL_ETH_RX_GCP_CRC32_BIT_SWAP_SHIFT; + gcp_table_gen |= (rx_gcp_entry->crc32_byte_swap & AL_ETH_RX_GCP_CRC32_BYTE_SWAP_MASK) << + AL_ETH_RX_GCP_CRC32_BYTE_SWAP_SHIFT; + gcp_table_gen |= (rx_gcp_entry->data_bit_swap & AL_ETH_RX_GCP_DATA_BIT_SWAP_MASK) << + AL_ETH_RX_GCP_DATA_BIT_SWAP_SHIFT; + gcp_table_gen |= (rx_gcp_entry->data_byte_swap & AL_ETH_RX_GCP_DATA_BYTE_SWAP_MASK) << + AL_ETH_RX_GCP_DATA_BYTE_SWAP_SHIFT; + gcp_table_gen |= (rx_gcp_entry->trail_size & AL_ETH_RX_GCP_TRAIL_SIZE_MASK) << + AL_ETH_RX_GCP_TRAIL_SIZE_SHIFT; + gcp_table_gen |= (rx_gcp_entry->head_size & AL_ETH_RX_GCP_HEAD_SIZE_MASK) << + AL_ETH_RX_GCP_HEAD_SIZE_SHIFT; + gcp_table_gen |= (rx_gcp_entry->head_calc & AL_ETH_RX_GCP_HEAD_CALC_MASK) << + AL_ETH_RX_GCP_HEAD_CALC_SHIFT; + gcp_table_gen |= (rx_gcp_entry->mask_polarity & AL_ETH_RX_GCP_MASK_POLARITY_MASK) << + AL_ETH_RX_GCP_MASK_POLARITY_SHIFT; + + rx_alu_opcode = (rx_gcp_entry->rx_alu_opcode_1 & AL_ETH_RX_GCP_OPCODE_1_MASK) << + AL_ETH_RX_GCP_OPCODE_1_SHIFT; + rx_alu_opcode |= (rx_gcp_entry->rx_alu_opcode_2 & AL_ETH_RX_GCP_OPCODE_2_MASK) << + AL_ETH_RX_GCP_OPCODE_2_SHIFT; + rx_alu_opcode |= (rx_gcp_entry->rx_alu_opcode_3 & AL_ETH_RX_GCP_OPCODE_3_MASK) << + AL_ETH_RX_GCP_OPCODE_3_SHIFT; + rx_alu_opsel = (rx_gcp_entry->rx_alu_opsel_1 & AL_ETH_RX_GCP_OPSEL_1_MASK) << + AL_ETH_RX_GCP_OPSEL_1_SHIFT; + rx_alu_opsel |= (rx_gcp_entry->rx_alu_opsel_2 & AL_ETH_RX_GCP_OPSEL_2_MASK) << + AL_ETH_RX_GCP_OPSEL_2_SHIFT; + rx_alu_opsel |= (rx_gcp_entry->rx_alu_opsel_3 & AL_ETH_RX_GCP_OPSEL_3_MASK) << + AL_ETH_RX_GCP_OPSEL_3_SHIFT; + rx_alu_opsel |= (rx_gcp_entry->rx_alu_opsel_4 & AL_ETH_RX_GCP_OPSEL_4_MASK) << + AL_ETH_RX_GCP_OPSEL_4_SHIFT; + + /* Rx Generic crc prameters table general */ + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_addr, idx); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_gen, + gcp_table_gen); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_mask_1, + rx_gcp_entry->gcp_mask[0]); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_mask_2, + rx_gcp_entry->gcp_mask[1]); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_mask_3, + rx_gcp_entry->gcp_mask[2]); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_mask_4, + rx_gcp_entry->gcp_mask[3]); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_mask_5, + rx_gcp_entry->gcp_mask[4]); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_mask_6, + rx_gcp_entry->gcp_mask[5]); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_crc_init, + rx_gcp_entry->crc_init); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_res, + rx_gcp_entry->gcp_table_res); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_alu_opcode, + rx_alu_opcode); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_alu_opsel, + rx_alu_opsel); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_table_alu_val, + rx_gcp_entry->alu_val); + return 0; +} + + +#define AL_ETH_TX_GENERIC_CRC_ENTRIES_NUM 7 +#define AL_ETH_RX_GENERIC_CRC_ENTRIES_NUM 31 + +static struct al_eth_tx_gpd_cam_entry al_eth_generic_tx_crc_gpd[AL_ETH_TX_GENERIC_CRC_ENTRIES_NUM] = { + + /* [0] roce (with grh, bth) */ + {22, 0, 0, 0, 1, + 0x1f, 0x0, 0x0, 0x0, }, + /* [1] fcoe */ + {21, 0, 0, 0, 1, + 0x1f, 0x0, 0x0, 0x0, }, + /* [2] routable_roce that is refered as l4_protocol, over IPV4 (and udp) */ + {8, 23, 0, 0, 1, + 0x1f, 0x1f, 0x0, 0x0, }, + /* [3] routable_roce that is refered as l4_protocol, over IPV6 (and udp) */ + {11, 23, 0, 0, 1, + 0x1f, 0x1f, 0x0, 0x0, }, + /* [4] routable_roce that is refered as tunneled_packet, over outer IPV4 and udp */ + {23, 0, 5, 0, 1, + 0x1f, 0x0, 0x5, 0x0, }, + /* [5] routable_roce that is refered as tunneled_packet, over outer IPV6 and udp */ + {23, 0, 3, 0, 1, + 0x1f, 0x0, 0x5, 0x0 }, + /* [6] default match */ + {0, 0, 0, 0, 1, + 0x0, 0x0, 0x0, 0x0 } +}; + +static struct al_eth_tx_gcp_table_entry al_eth_generic_tx_crc_gcp[AL_ETH_TX_GENERIC_CRC_ENTRIES_NUM] = { + + /* [0] roce (with grh, bth) */ + {0, 1, 1, 0, 1, + 0, 4, 8, 0, 1, + 0, 0, 0, 0, 0, + 0, 0, {0xffff7f03, 0x00000000, 0x00000000, + 0x00c00000, 0x00000000, 0x00000000}, 0xffffffff, 0x20, + 0}, + /* [1] fcoe */ + {0, 1, 0, 0, 1, + 0, 8, 14, 1, 1, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x20, + 0}, + /* [2] routable_roce that is refered as l4_protocol, over IPV4 (and udp) */ + {0, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 0, 0, 0, 0, 0, + 0, 0, {0x3000cf00, 0x00000f00, 0xc0000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x20, + 0}, + /* [3] routable_roce that is refered as l4_protocol, over IPV6 (and udp) */ + {0, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 0, 0, 0, 0, 0, + 0, 0, {0x7f030000, 0x00000000, 0x000f00c0, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x20, + 0}, + /* [4] routable_roce that is refered as tunneled_packet, over outer IPV4 and udp */ + {0, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 2, 0, 0, 0, 10, + 0, 0, {0x3000cf00, 0x00000f00, 0xc0000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x20, + 28}, + /* [5] routable_roce that is refered as tunneled_packet, over outer IPV6 and udp */ + {0, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 2, 0, 0, 0, 10, + 0, 0, {0x7f030000, 0x00000000, 0x000f00c0, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x20, + 48}, + /* [6] default match */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x0, + 0} +}; + +static struct al_eth_tx_crc_chksum_replace_cmd_for_protocol_num_entry al_eth_tx_crc_chksum_replace_cmd[AL_ETH_TX_GENERIC_CRC_ENTRIES_NUM] = { + + /* [0] roce (with grh, bth) */ + {0,1,0,1, 0,0,0,0, 0,0,0,0}, + /* [1] fcoe */ + {0,1,0,1, 0,0,0,0, 0,0,0,0}, + /* [2] routable_roce that is refered as l4_protocol, over IPV4 (and udp) */ + {0,0,1,1, 0,0,0,0, 0,1,0,1}, + /* [3] routable_roce that is refered as l4_protocol, over IPV6 (and udp) */ + {0,0,1,1, 0,0,0,0, 0,0,0,0}, + /* [4] routable_roce that is refered as tunneled_packet, over outer IPV4 and udp */ + {0,1,0,1, 0,0,0,0, 0,0,0,0}, + /* [5] routable_roce that is refered as tunneled_packet, over outer IPV6 and udp */ + {0,1,0,1, 0,0,0,0, 0,0,0,0}, + /* [6] default match */ + {0,0,0,0, 0,0,1,1, 0,1,0,1} +}; + +static struct al_eth_rx_gpd_cam_entry al_eth_generic_rx_crc_gpd[AL_ETH_RX_GENERIC_CRC_ENTRIES_NUM] = { + + /* [0] roce (with grh, bth) */ + {22, 0, 0, 0, + 0, 0, 0, 0, 1, + 0x1f, 0x0, 0x0, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [1] fcoe */ + {21, 0, 0, 0, + 0, 0, 0, 0, 1, + 0x1f, 0x0, 0x0, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [2] routable_roce that is refered as l4_protocol, over IPV4 (and udp) */ + {8, 23, 0, 0, + 0, 0, 0, 0, 1, + 0x1f, 0x1f, 0x0, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [3] routable_roce that is refered as l4_protocol, over IPV6 (and udp) */ + {11, 23, 0, 0, + 0, 0, 0, 0, 1, + 0x1f, 0x1f, 0x0, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [4] routable_roce that is refered as tunneled_packet, over outer IPV4 and udp */ + {8, 13, 23, 0, + 0, 0, 0, 0, 1, + 0x1f, 0x1f, 0x1f, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [5] routable_roce that is refered as tunneled_packet, over outer IPV6 and udp */ + {11, 13, 23, 0, + 0, 0, 0, 0, 1, + 0x1f, 0x1f, 0x1f, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [6] tunneled roce (with grh, bth) over GRE over IPV4 */ + {8, 14, 22, 0, + 4, 0, 0, 0, 1, + 0x1f, 0x1f, 0x1f, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [7] tunneled roce (with grh, bth) over GRE over IPV6 */ + {11, 14, 22, 0, + 4, 0, 0, 0, 1, + 0x1f, 0x1f, 0x1f, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [8] tunneled fcoe over IPV4 */ + {8, 0, 21, 0, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [9] tunneled fcoe over IPV6 */ + {11, 0, 21, 0, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [10] tunneled routable_roce that is refered as l4_protocol, over IPV4 (and udp) over IPV4 */ + {8, 0, 8, 23, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x1f, + 0x4, 0x0, 0x0, 0x0}, + /* [11] tunneled routable_roce that is refered as l4_protocol, over IPV4 (and udp) over IPV6 */ + {11, 0, 8, 23, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x1f, + 0x4, 0x0, 0x0, 0x0}, + /* [12] tunneled routable_roce that is refered as l4_protocol, over IPV6 (and udp) over IPV4 */ + {8, 0, 11, 23, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x1f, + 0x4, 0x0, 0x0, 0x0}, + /* [13] tunneled routable_roce that is refered as l4_protocol, over IPV6 (and udp) over IPV6 */ + {11, 0, 11, 23, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x1f, + 0x4, 0x0, 0x0, 0x0}, + /* [14] l3_pkt - IPV4 */ + {8, 0, 0, 0, + 0, 0, 0, 0, 1, + 0x1f, 0x1f, 0x0, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [15] l4_hdr over IPV4 */ + {8, 12, 0, 0, + 0, 0, 0, 0, 1, + 0x1f, 0x1e, 0x0, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [16] l3_pkt - IPV6 */ + {11, 0, 0, 0, + 0, 0, 0, 0, 1, + 0x1f, 0x1f, 0x0, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [17] l4_hdr over IPV6 */ + {11, 12, 0, 0, + 0, 0, 0, 0, 1, + 0x1f, 0x1e, 0x0, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [18] IPV4 over IPV4 */ + {8, 0, 8, 0, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x1f, + 0x4, 0x0, 0x0, 0x0}, + /* [19] l4_hdr over IPV4 over IPV4 */ + {8, 0, 8, 12, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x1e, + 0x4, 0x0, 0x0, 0x0}, + /* [20] IPV4 over IPV6 */ + {11, 0, 8, 0, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x1f, + 0x4, 0x0, 0x0, 0x0}, + /* [21] l4_hdr over IPV4 over IPV6 */ + {11, 0, 8, 12, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x1e, + 0x4, 0x0, 0x0, 0x0}, + /* [22] IPV6 over IPV4 */ + {8, 0, 11, 0, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x1f, + 0x4, 0x0, 0x0, 0x0}, + /* [23] l4_hdr over IPV6 over IPV4 */ + {8, 0, 11, 12, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x1e, + 0x4, 0x0, 0x0, 0x0}, + /* [24] IPV6 over IPV6 */ + {11, 0, 11, 0, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x1f, + 0x4, 0x0, 0x0, 0x0}, + /* [25] l4_hdr over IPV6 over IPV6 */ + {11, 0, 11, 12, + 4, 0, 0, 0, 1, + 0x1f, 0x0, 0x1f, 0x1e, + 0x4, 0x0, 0x0, 0x0}, + /* [26] GENERIC_STORAGE_READ, over IPV4 (and udp) */ + {8, 2, 0, 0, + 0, 0, 0, 0, 1, + 0x1f, 0x1f, 0x0, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [27] GENERIC_STORAGE_READ, over IPV6 (and udp) */ + {11, 2, 0, 0, + 0, 0, 0, 0, 1, + 0x1f, 0x1f, 0x0, 0x0, + 0x4, 0x0, 0x0, 0x0}, + /* [28] tunneled GENERIC_STORAGE_READ over IPV4 (and udp) over IPV4/IPV6 */ + {8, 0, 8, 2, + 4, 0, 0, 0, 1, + 0x18, 0x0, 0x1f, 0x1f, + 0x4, 0x0, 0x0, 0x0}, + /* [29] tunneled GENERIC_STORAGE_READ over IPV6 (and udp) over IPV4/IPV6 */ + {8, 0, 11, 2, + 4, 0, 0, 0, 1, + 0x18, 0x0, 0x1f, 0x1f, + 0x4, 0x0, 0x0, 0x0}, + /* [30] default match */ + {0, 0, 0, 0, + 0, 0, 0, 0, 1, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0} +}; + +static struct al_eth_rx_gcp_table_entry al_eth_generic_rx_crc_gcp[AL_ETH_RX_GENERIC_CRC_ENTRIES_NUM] = { + + /* [0] roce (with grh, bth) */ + {0, 1, 1, 0, 1, + 0, 4, 8, 0, 1, + 0, 0, 0, 0, 0, + 0, 0, {0xffff7f03, 0x00000000, 0x00000000, + 0x00c00000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000010, + 0}, + /* [1] fcoe */ + {0, 1, 0, 0, 1, + 0, 8, 14, 1, 1, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000010, + 0}, + /* [2] routable_roce that is refered as l4_protocol, over IPV4 (and udp) */ + {0, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 0, 0, 0, 0, 0, + 0, 0, {0x3000cf00, 0x00000f00, 0xc0000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000014, + 0}, + /* [3] routable_roce that is refered as l4_protocol, over IPV6 (and udp) */ + {0, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 0, 0, 0, 0, 0, + 0, 0, {0x7f030000, 0x00000000, 0x000f00c0, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000010, + 0}, + /* [4] routable_roce that is refered as tunneled_packet, over outer IPV4 and udp */ + {0, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 2, 0, 0, 0, 10, + 0, 0, {0x3000cf00, 0x00000f00, 0xc0000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000014, + 28}, + /* [5] routable_roce that is refered as tunneled_packet, over outer IPV4 and udp */ + {0, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 2, 0, 0, 0, 10, + 0, 0, {0x7f030000, 0x00000000, 0x000f00c0, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000014, + 48}, + /* [6] tunneled roce (with grh, bth) over IPV4 */ + {0, 1, 1, 0, 1, + 0, 4, 8, 0, 1, + 0, 0, 0, 1, 0, + 0, 0, {0xffff7f03, 0x00000000, 0x00000000, + 0x00c00000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000014, + 0}, + /* [7] tunneled roce (with grh, bth) over IPV6 */ + {0, 1, 1, 0, 1, + 0, 4, 8, 0, 1, + 0, 0, 0, 1, 0, + 0, 0, {0xffff7f03, 0x00000000, 0x00000000, + 0x00c00000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000010, + 0}, + /* [8] tunneled fcoe over IPV4 */ + {0, 1, 0, 0, 1, + 0, 8, 14, 1, 1, + 0, 0, 0, 1, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000014, + 0}, + /* [9] tunneled fcoe over IPV6 */ + {0, 1, 0, 0, 1, + 0, 8, 14, 1, 1, + 0, 0, 0, 1, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000010, + 0}, + /* [10] tunneled routable_roce that is refered as l4_protocol, over IPV4 (and udp) over IPV4 */ + {0, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 0, 0, 0, 1, 0, + 0, 0, {0x3000cf00, 0x00000f00, 0xc0000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000015, + 0}, + /* [11] tunneled routable_roce that is refered as l4_protocol, over IPV4 (and udp) over IPV6 */ + {0, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 0, 0, 0, 1, 0, + 0, 0, {0x3000cf00, 0x00000f00, 0xc0000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000011, + 0}, + /* [12] tunneled routable_roce that is refered as l4_protocol, over IPV6 (and udp) over IPV4 */ + {0, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 0, 0, 0, 1, 0, + 0, 0, {0x7f030000, 0x00000000, 0x000f00c0, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000014, + 0}, + /* [13] tunneled routable_roce that is refered as l4_protocol, over IPV6 (and udp) over IPV6 */ + {0, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 0, 0, 0, 1, 0, + 0, 0, {0x7f030000, 0x00000000, 0x000f00c0, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000010, + 0}, + /* [14] l3_pkt - IPV4 */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x1, + 0}, + /* [15] l4_hdr over IPV4 */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x3, + 0}, + /* [16] l3_pkt - IPV6 */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x0, + 0}, + /* [17] l4_hdr over IPV6 */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x2, + 0}, + /* [18] IPV4 over IPV4 */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x5, + 0}, + /* [19] l4_hdr over IPV4 over IPV4 */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x7, + 0}, + /* [20] IPV4 over IPV6 */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x1, + 0}, + /* [21] l4_hdr over IPV4 over IPV6 */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x3, + 0}, + /* [22] IPV6 over IPV4 */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x4, + 0}, + /* [23] l4_hdr over IPV6 over IPV4 */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x6, + 0}, + /* [24] IPV6 over IPV6 */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x0, + 0}, + /* [25] l4_hdr over IPV6 over IPV6 */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x2, + 0}, + /* [26] GENERIC_STORAGE_READ, over IPV4 (and udp) */ + {1, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 0, 0, 0, 2, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000014, + 0}, + /* [27] GENERIC_STORAGE_READ, over IPV6 (and udp) */ + {1, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 0, 0, 0, 2, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000010, + 0}, + /* [28] tunneled GENERIC_STORAGE_READ over IPV4 (and udp) over IPV4/IPV6 */ + {1, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 0, 0, 0, 3, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000011, + 0}, + /* [29] tunneled GENERIC_STORAGE_READ over IPV6 (and udp) over IPV4/IPV6 */ + {1, 1, 1, 0, 1, + 0, 4, 0, 0, 1, + 0, 0, 0, 3, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0xffffffff, 0x02000010, + 0}, + /* [30] default match */ + {0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, {0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000}, 0x00000000, 0x0, + 0} +}; + +int al_eth_generic_crc_init(struct al_hal_eth_adapter *adapter) +{ + int idx; + al_assert((adapter->rev_id > AL_ETH_REV_ID_2)); + + al_dbg("eth [%s]: enable tx_generic_crc\n", adapter->name); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.tx_gcp_legacy, 0x0); + al_reg_write32(&adapter->ec_regs_base->tfw_v3.crc_csum_replace, 0x0); + al_dbg("eth [%s]: enable rx_generic_crc\n", adapter->name); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.rx_gcp_legacy, 0x0); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.gpd_p1, AL_ETH_RX_GPD_PARSE_RESULT_OUTER_L3_PROTO_IDX_OFFSET); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.gpd_p2, AL_ETH_RX_GPD_PARSE_RESULT_OUTER_L4_PROTO_IDX_OFFSET); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.gpd_p3, AL_ETH_RX_GPD_PARSE_RESULT_INNER_L3_PROTO_IDX_OFFSET); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.gpd_p4, AL_ETH_RX_GPD_PARSE_RESULT_INNER_L4_PROTO_IDX_OFFSET); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.gpd_p5, AL_ETH_RX_GPD_PARSE_RESULT_OUTER_PARSE_CTRL); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.gpd_p6, AL_ETH_RX_GPD_PARSE_RESULT_INNER_PARSE_CTRL); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.gpd_p7, AL_ETH_RX_GPD_PARSE_RESULT_L3_PRIORITY); + al_reg_write32(&adapter->ec_regs_base->rfw_v3.gpd_p8, AL_ETH_RX_GPD_PARSE_RESULT_OUTER_L4_DST_PORT_LSB); + + for (idx = 0; idx < AL_ETH_TX_GENERIC_CRC_ENTRIES_NUM; idx++) + { + al_eth_tx_generic_crc_entry_set(adapter, idx, &al_eth_generic_tx_crc_gpd[idx], &al_eth_generic_tx_crc_gcp[idx], &al_eth_tx_crc_chksum_replace_cmd[idx]); + } + for (idx = 0; idx < AL_ETH_RX_GENERIC_CRC_ENTRIES_NUM; idx++) + { + al_eth_rx_generic_crc_entry_set(adapter, idx, &al_eth_generic_rx_crc_gpd[idx], &al_eth_generic_rx_crc_gcp[idx]); + } + return 0; +} + +/** @} end of Ethernet group */ + diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_init_eth_kr.c b/target/linux/alpine/files/drivers/net/ethernet/al/al_init_eth_kr.c new file mode 100644 index 00000000000000..a21e6781d2008e --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_init_eth_kr.c @@ -0,0 +1,834 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/** + * Ethernet + * @{ + * @file al_init_eth_kr.c + * + * @brief auto-negotiation and link training algorithms and state machines + * + * The link training algorithm implemented in this file going over the + * coefficients and looking for the best eye measurement possible for every one + * of them. it's using state machine to move between the different states. + * the state machine has 3 parts: + * - preparation - waiting till the link partner (lp) will be ready and + * change his state to preset. + * - measurement (per coefficient) - issue decrement for the coefficient + * under control till the eye measurement not increasing + * and remains in the optimum. + * - completion - indicate the receiver is ready and wait for the lp to + * finish his work. + */ + +#include +#include "al_init_eth_kr.h" + + +/* TODO: fix with more reasonable numbers */ +/* timeout in mSec before auto-negotiation will be terminated */ +#define AL_ETH_KR_AN_TIMEOUT (500) +#define AL_ETH_KR_EYE_MEASURE_TIMEOUT (100) +/* timeout in uSec before the process will be terminated */ +#define AL_ETH_KR_FRAME_LOCK_TIMEOUT (500 * 1000) +#define AL_ETH_KR_LT_DONE_TIMEOUT (500 * 1000) +/* number of times the receiver and transmitter tasks will be called before the + * algorithm will be terminated */ +#define AL_ETH_KR_LT_MAX_ROUNDS (50000) + +/* mac algorithm state machine */ +enum al_eth_kr_mac_lt_state { + TX_INIT = 0, /* start of all */ + WAIT_BEGIN, /* wait for initial training lock */ + DO_PRESET, /* issue PRESET to link partner */ + DO_HOLD, /* issue HOLD to link partner */ + /* preparation is done, start testing the coefficient. */ + QMEASURE, /* EyeQ measurement. */ + QCHECK, /* Check if measurement shows best value. */ + DO_NEXT_TRY, /* issue DEC command to coeff for next measurement. */ + END_STEPS, /* perform last steps to go back to optimum. */ + END_STEPS_HOLD, /* perform last steps HOLD command. */ + COEFF_DONE, /* done with the current coefficient updates. + * Check if another should be done. */ + /* end of training to all coefficients */ + SET_READY, /* indicate local receiver ready */ + TX_DONE /* transmit process completed, training can end. */ +}; + +static char *al_eth_kr_mac_sm_name[] = {"TX_INIT", "WAIT_BEGIN", "DO_PRESET", + "DO_HOLD", "QMEASURE", "QCHECK", + "DO_NEXT_TRY", "END_STEPS", + "END_STEPS_HOLD", "COEFF_DONE", + "SET_READY", "TX_DONE"}; + +/* constants used for the measurement. */ +enum al_eth_kr_coef { + AL_ETH_KR_COEF_C_MINUS, + AL_ETH_KR_COEF_C_ZERO, + AL_ETH_KR_COEF_C_PLUS, +}; + +/* + * test coefficients from COEFF_TO_MANIPULATE to COEFF_TO_MANIPULATE_LAST. + */ +#define COEFF_TO_MANIPULATE AL_ETH_KR_COEF_C_MINUS +#define COEFF_TO_MANIPULATE_LAST AL_ETH_KR_COEF_C_MINUS +#define QARRAY_SIZE 3 /**< how many entries we want in our history array. */ + +struct al_eth_kr_data { + struct al_hal_eth_adapter *adapter; + struct al_serdes_obj *serdes_obj; + enum al_serdes_group grp; + enum al_serdes_lane lane; + + /* receiver side data */ + struct al_eth_kr_status_report_data status_report; /* report to response */ + struct al_eth_kr_coef_up_data last_lpcoeff; /* last coeff received */ + + /* transmitter side data */ + enum al_eth_kr_mac_lt_state algo_state; /* statemachine. */ + unsigned int qarray[QARRAY_SIZE]; /*< EyeQ measurements history */ + unsigned int qarray_cnt; /*< how many entries in the array are + valid for compares yet. */ + enum al_eth_kr_coef curr_coeff; + unsigned int coeff_status_step; /*< status of coefficient during the + last DEC/INC command (before issuing + HOLD again). */ + unsigned int end_steps_cnt; /*< number of end steps needed */ +}; + +static int al_eth_kr_an_run(struct al_eth_kr_data *kr_data, + struct al_eth_an_adv *an_adv, + struct al_eth_an_adv *an_partner_adv) +{ + int rc; + al_bool page_received = AL_FALSE; + al_bool an_completed = AL_FALSE; + al_bool error = AL_FALSE; + int timeout = AL_ETH_KR_AN_TIMEOUT; + + rc = al_eth_kr_an_init(kr_data->adapter, an_adv); + if (rc) { + al_err("%s %s autonegotiation init failed\n", + kr_data->adapter->name, __func__); + return rc; + } + + rc = al_eth_kr_an_start(kr_data->adapter, AL_TRUE); + if (rc) { + al_err("%s %s autonegotiation enable failed\n", + kr_data->adapter->name, __func__); + return rc; + } + + do { + al_msleep(10); + timeout -= 10; + if (timeout <= 0) { + al_info("%s %s autonegotiation failed on timeout\n", + kr_data->adapter->name, __func__); + + return -ETIMEDOUT; + } + + al_eth_kr_an_status_check(kr_data->adapter, &page_received, + &an_completed, &error); + } while (page_received == AL_FALSE); + + if (error != 0) { + al_info("%s %s autonegotiation failed (status error)\n", + kr_data->adapter->name, __func__); + + return -EIO; + } + + al_eth_kr_an_read_adv(kr_data->adapter, an_partner_adv); + + al_dbg("%s %s autonegotiation completed. error = %d\n", + kr_data->adapter->name, __func__, error); + + return 0; +} + +/***************************** receiver side *********************************/ +static enum al_eth_kr_cl72_cstate al_eth_lt_coeff_set( + struct al_eth_kr_data *kr_data, + enum al_serdes_tx_deemph_param param, + uint32_t op) +{ + enum al_eth_kr_cl72_cstate status = 0; + + switch (op) { + case AL_PHY_KR_COEF_UP_HOLD: + /* no need to update the serdes - return not updated*/ + status = C72_CSTATE_NOT_UPDATED; + break; + case AL_PHY_KR_COEF_UP_INC: + status = C72_CSTATE_UPDATED; + + if (!al_serdes_tx_deemph_inc(kr_data->serdes_obj, + kr_data->grp, + kr_data->lane, + param)) + status = C72_CSTATE_MAX; + + break; + case AL_PHY_KR_COEF_UP_DEC: + status = C72_CSTATE_UPDATED; + + if (!al_serdes_tx_deemph_dec(kr_data->serdes_obj, + kr_data->grp, + kr_data->lane, + param)) + status = C72_CSTATE_MIN; + + break; + default: /* 3=reserved */ + break; + } + + return status; +} + +/* + * Inspect the received coefficient update request and update all coefficients + * in the serdes accordingly. + */ +void al_eth_coeff_req_handle(struct al_eth_kr_data *kr_data, + struct al_eth_kr_coef_up_data *lpcoeff) +{ + struct al_eth_kr_status_report_data *report = &kr_data->status_report; + + /* First check for Init and Preset commands. */ + /* TODO: should initialize be different from preset */ + if (lpcoeff->preset || lpcoeff->initialize) { + al_serdes_tx_deemph_preset(kr_data->serdes_obj, + kr_data->grp, + kr_data->lane); + + /* in case of preset c(0) should be set to maximum and both c(1) + * and c(-1) should be updated */ + report->c_minus = C72_CSTATE_UPDATED; + + report->c_plus = C72_CSTATE_UPDATED; + + report->c_zero = C72_CSTATE_MAX; + + return; + } + + /* in case preset and initialize are false need to perform per + * coefficient action. + */ + + report->c_minus = al_eth_lt_coeff_set(kr_data, + AL_SERDES_TX_DEEMP_C_MINUS, + lpcoeff->c_minus); + + report->c_zero = al_eth_lt_coeff_set(kr_data, + AL_SERDES_TX_DEEMP_C_ZERO, + lpcoeff->c_zero); + + report->c_plus = al_eth_lt_coeff_set(kr_data, + AL_SERDES_TX_DEEMP_C_PLUS, + lpcoeff->c_plus); + + al_dbg("%s: c(0) = 0x%x c(-1) = 0x%x c(1) = 0x%x\n", + __func__, report->c_zero, report->c_plus, report->c_minus); +} + + +void al_eth_kr_lt_receiver_task_init(struct al_eth_kr_data *kr_data) +{ + al_memset(&kr_data->last_lpcoeff, 0, sizeof(struct al_eth_kr_coef_up_data)); + al_memset(&kr_data->status_report, 0, sizeof(struct al_eth_kr_status_report_data)); +} + + +static al_bool al_eth_lp_coeff_up_change(struct al_eth_kr_data *kr_data, + struct al_eth_kr_coef_up_data *lpcoeff) +{ + struct al_eth_kr_coef_up_data *last_lpcoeff = &kr_data->last_lpcoeff; + + if (!al_memcmp(last_lpcoeff, lpcoeff, sizeof(struct al_eth_kr_coef_up_data))) + return AL_FALSE; + + al_memcpy(last_lpcoeff, lpcoeff, sizeof(struct al_eth_kr_coef_up_data)); + return AL_TRUE; + +} + +/** + * Run the receiver task for one cycle. + * The receiver task continuously inspects the received coefficient update + * requests and acts upon. + * + * @return <0 if error occur + */ +int al_eth_kr_lt_receiver_task_run(struct al_eth_kr_data *kr_data) +{ + struct al_eth_kr_coef_up_data new_lpcoeff; + + /* + * First inspect status of the link. It may have dropped frame lock as + * the remote did some reconfiguration of its serdes. + * Then we simply have nothing to do and return immediately as caller + * will call us continuously until lock comes back. + */ + + if (!al_eth_kr_receiver_frame_lock_get(kr_data->adapter)) + return 0; + + /* check if a new update command was received */ + al_eth_lp_coeff_up_get(kr_data->adapter, &new_lpcoeff); + + if (al_eth_lp_coeff_up_change(kr_data, &new_lpcoeff)) + /* got some new coefficient update request. */ + al_eth_coeff_req_handle(kr_data, &new_lpcoeff); + + return 0; +} + + +/******************************** transmitter side ***************************/ +int al_eth_kr_lt_transmitter_task_init(struct al_eth_kr_data *kr_data) +{ + int i; + int rc; + uint32_t temp_val; + + for (i = 0 ; i < QARRAY_SIZE ; i++) + kr_data->qarray[i] = 0; + + kr_data->qarray_cnt = 0; + kr_data->algo_state = TX_INIT; + kr_data->curr_coeff = COEFF_TO_MANIPULATE; /* first coeff to test. */ + kr_data->coeff_status_step = C72_CSTATE_NOT_UPDATED; + kr_data->end_steps_cnt = QARRAY_SIZE-1; /* go back to first entry */ + + /* Perform measure eye here to run the rx equalizer + * for the first time to get init values */ + rc = al_serdes_eye_measure_run(kr_data->serdes_obj, + kr_data->grp, + kr_data->lane, + AL_ETH_KR_EYE_MEASURE_TIMEOUT, + &temp_val); + if (rc != 0) { + al_warn("%s: Failed to run Rx equalizer (rc = 0x%x)\n", + __func__, rc); + + return rc; + } + + return 0; +} + + +static al_bool al_eth_kr_lt_all_not_updated( + struct al_eth_kr_status_report_data *report) +{ + if ((report->c_zero == C72_CSTATE_NOT_UPDATED) && + (report->c_minus == C72_CSTATE_NOT_UPDATED) && + (report->c_plus == C72_CSTATE_NOT_UPDATED)) { + return AL_TRUE; + } + + return AL_FALSE; +} + +static void al_eth_kr_lt_coef_set( + struct al_eth_kr_coef_up_data *ldcoeff, + enum al_eth_kr_coef coef, + enum al_eth_kr_cl72_coef_op op) +{ + switch (coef) { + case AL_ETH_KR_COEF_C_MINUS: + ldcoeff->c_minus = op; + break; + case AL_ETH_KR_COEF_C_PLUS: + ldcoeff->c_plus = op; + break; + case AL_ETH_KR_COEF_C_ZERO: + ldcoeff->c_zero = op; + break; + } +} + +static enum al_eth_kr_cl72_coef_op al_eth_kr_lt_coef_report_get( + struct al_eth_kr_status_report_data *report, + enum al_eth_kr_coef coef) +{ + switch (coef) { + case AL_ETH_KR_COEF_C_MINUS: + return report->c_minus; + case AL_ETH_KR_COEF_C_PLUS: + return report->c_plus; + case AL_ETH_KR_COEF_C_ZERO: + return report->c_zero; + } + + return 0; +} + +/** Run the transmitter_task for one cycle. + * + * @return <0 if error occurs + */ +int al_eth_kr_lt_transmitter_task_run(struct al_eth_kr_data *kr_data) +{ + struct al_eth_kr_status_report_data report; + unsigned int coeff_status_cur; + struct al_eth_kr_coef_up_data ldcoeff = { 0, 0, 0, 0, 0 }; + uint32_t val; + int i; + enum al_eth_kr_mac_lt_state nextstate; + int rc = 0; + + /* do nothing if currently there is no frame lock (which may happen */ + /* when remote updates its analogs). */ + if (!(al_eth_kr_receiver_frame_lock_get(kr_data->adapter))) + return 0; + + al_eth_lp_status_report_get(kr_data->adapter, &report); + + /* extract curr status of the coefficient in use */ + coeff_status_cur = al_eth_kr_lt_coef_report_get(&report, kr_data->curr_coeff); + + nextstate = kr_data->algo_state; /* default we stay in curr state; */ + + switch (kr_data->algo_state) { + case TX_INIT: + /* waiting for start */ + if (al_eth_kr_startup_proto_prog_get(kr_data->adapter)) + /* training is on and frame lock */ + nextstate = WAIT_BEGIN; + break; + + case WAIT_BEGIN: + kr_data->qarray_cnt = 0; + kr_data->curr_coeff = COEFF_TO_MANIPULATE; + kr_data->coeff_status_step = C72_CSTATE_NOT_UPDATED; + coeff_status_cur = C72_CSTATE_NOT_UPDATED; + kr_data->end_steps_cnt = QARRAY_SIZE-1; + + /* Wait for not_updated for all coefficients from remote */ + if (al_eth_kr_lt_all_not_updated(&report)) { + ldcoeff.preset = AL_TRUE; + + nextstate = DO_PRESET; + } + + break; + case DO_PRESET: + /* + * Send PRESET and wait for for updated for all + * coefficients from remote + */ + if (!al_eth_kr_lt_all_not_updated(&report)) + nextstate = DO_HOLD; + else /* as long as the lp didn't response to the preset + * we should continue sending it */ + ldcoeff.preset = AL_TRUE; + + break; + case DO_HOLD: + /* + * clear the PRESET, issue HOLD command and wait for + * hold handshake + */ + if (al_eth_kr_lt_all_not_updated(&report)) + nextstate = QMEASURE; + break; + + case QMEASURE: + /* makes a measurement and fills the new value into the array */ + rc = al_serdes_eye_measure_run(kr_data->serdes_obj, + kr_data->grp, + kr_data->lane, + AL_ETH_KR_EYE_MEASURE_TIMEOUT, + &val); + if (rc != 0) { + al_warn("%s: Rx eye measurement failed\n", __func__); + + return rc; + } + + al_dbg("%s: Rx Measure eye returned 0x%x\n", __func__, val); + + /* put the new value into the array at the top. */ + for (i = 0 ; i < QARRAY_SIZE-1 ; i++) + kr_data->qarray[i] = kr_data->qarray[i+1]; + + kr_data->qarray[QARRAY_SIZE-1] = val; + + if (kr_data->qarray_cnt < QARRAY_SIZE) + kr_data->qarray_cnt++; + + nextstate = QCHECK; + break; + case QCHECK: + /* check if we reached the best link quality yet. */ + if (kr_data->qarray_cnt < QARRAY_SIZE) { + /* keep going until at least the history is + * filled. check that we can keep going or if + * coefficient has already reached minimum. + */ + + if (kr_data->coeff_status_step == C72_CSTATE_MIN) + nextstate = COEFF_DONE; + else { + /* + * request a DECREMENT of the + * coefficient under control + */ + al_eth_kr_lt_coef_set(&ldcoeff, + kr_data->curr_coeff, + AL_PHY_KR_COEF_UP_DEC); + + + nextstate = DO_NEXT_TRY; + } + } else { + /* + * check if current value and last both are worse than + * the 2nd last. This we take as an ending condition + * assuming the minimum was reached two tries before + * so we will now go back to that point. + */ + if ((kr_data->qarray[0] < kr_data->qarray[1]) && + (kr_data->qarray[0] < kr_data->qarray[2])) { + /* + * request a INCREMENT of the + * coefficient under control + */ + al_eth_kr_lt_coef_set(&ldcoeff, + kr_data->curr_coeff, + AL_PHY_KR_COEF_UP_INC); + + /* start going back to the maximum */ + nextstate = END_STEPS; + if (kr_data->end_steps_cnt > 0) + kr_data->end_steps_cnt--; + } else { + if (kr_data->coeff_status_step == + C72_CSTATE_MIN) + nextstate = COEFF_DONE; + else { + /* + * request a DECREMENT of the + * coefficient under control + */ + al_eth_kr_lt_coef_set(&ldcoeff, + kr_data->curr_coeff, + AL_PHY_KR_COEF_UP_DEC); + + nextstate = DO_NEXT_TRY; + } + } + } + break; + case DO_NEXT_TRY: + /* save the status when we issue the DEC step to the remote, + * before the HOLD is done again. */ + kr_data->coeff_status_step = coeff_status_cur; + + if (coeff_status_cur != C72_CSTATE_NOT_UPDATED) + nextstate = DO_HOLD; /* go to next measurement round */ + else + al_eth_kr_lt_coef_set(&ldcoeff, + kr_data->curr_coeff, + AL_PHY_KR_COEF_UP_DEC); + + break; + /* + * Coefficient iteration completed, go back to the optimum step + * In this algorithm we assume 2 before curr was best hence need to do + * two INC runs. + */ + case END_STEPS: + if (coeff_status_cur != C72_CSTATE_NOT_UPDATED) + nextstate = END_STEPS_HOLD; + else + al_eth_kr_lt_coef_set(&ldcoeff, + kr_data->curr_coeff, + AL_PHY_KR_COEF_UP_INC); + + break; + case END_STEPS_HOLD: + if (coeff_status_cur == C72_CSTATE_NOT_UPDATED) { + if (kr_data->end_steps_cnt != 0) { + /* + * request a INCREMENT of the + * coefficient under control + */ + al_eth_kr_lt_coef_set(&ldcoeff, + kr_data->curr_coeff, + AL_PHY_KR_COEF_UP_INC); + + /* go 2nd time - dec the end step count */ + nextstate = END_STEPS; + + if (kr_data->end_steps_cnt > 0) + kr_data->end_steps_cnt--; + + } else { + nextstate = COEFF_DONE; + } + } + break; + case COEFF_DONE: + /* + * now this coefficient is done. + * We can now either choose to finish here, + * or keep going with another coefficient. + */ + if ((int)kr_data->curr_coeff < COEFF_TO_MANIPULATE_LAST) { + int i; + + for (i = 0 ; i < QARRAY_SIZE ; i++) + kr_data->qarray[i] = 0; + + kr_data->qarray_cnt = 0; + kr_data->end_steps_cnt = QARRAY_SIZE-1; + kr_data->coeff_status_step = C72_CSTATE_NOT_UPDATED; + kr_data->curr_coeff++; + + al_dbg("[%s]: doing next coefficient: %d ---\n\n", + kr_data->adapter->name, kr_data->curr_coeff); + + nextstate = QMEASURE; + } else { + nextstate = SET_READY; + } + break; + case SET_READY: + /* + * our receiver is ready for data. + * no training will occur any more. + */ + kr_data->status_report.receiver_ready = AL_TRUE; + /* + * in addition to the status we transmit, we also must tell our + * local hardware state-machine that we are done, so the + * training can eventually complete when the remote indicates + * it is ready also. The hardware will then automatically + * give control to the PCS layer completing training. + */ + al_eth_receiver_ready_set(kr_data->adapter); + + nextstate = TX_DONE; + break; + case TX_DONE: + break; /* nothing else to do */ + default: + nextstate = kr_data->algo_state; + break; + } + + /* + * The status we want to transmit to remote. + * Note that the status combines the receiver status of all coefficients + * with the transmitter's rx ready status. + */ + if (kr_data->algo_state != nextstate) + al_dbg("[%s] [al_eth_kr_lt_transmit_run] STM changes %s -> %s: " + " Qarray=%d/%d/%d\n", kr_data->adapter->name, + al_eth_kr_mac_sm_name[kr_data->algo_state], + al_eth_kr_mac_sm_name[nextstate], + kr_data->qarray[0], kr_data->qarray[1], kr_data->qarray[2]); + + kr_data->algo_state = nextstate; + + /* + * write fields for transmission into hardware. + * Important: this must be done always, as the receiver may have + * received update commands and wants to return its status. + */ + + al_eth_ld_coeff_up_set(kr_data->adapter, &ldcoeff); + al_eth_ld_status_report_set(kr_data->adapter, &kr_data->status_report); + + return 0; +} + +/*****************************************************************************/ +static int al_eth_kr_run_lt(struct al_eth_kr_data *kr_data) +{ + unsigned int cnt; + int ret = 0; + al_bool page_received = AL_FALSE; + al_bool an_completed = AL_FALSE; + al_bool error = AL_FALSE; + al_bool training_failure = AL_FALSE; + + al_eth_kr_lt_initialize(kr_data->adapter); + + if (al_eth_kr_lt_frame_lock_wait( + kr_data->adapter, AL_ETH_KR_FRAME_LOCK_TIMEOUT) == AL_TRUE) { + /* + * when locked, for the first time initialize the receiver and + * transmitter tasks to prepare it for detecting coefficient + * update requests. + */ + + al_eth_kr_lt_receiver_task_init(kr_data); + ret = al_eth_kr_lt_transmitter_task_init(kr_data); + if (ret != 0) + goto error; + + cnt = 0; + do { + ret = al_eth_kr_lt_receiver_task_run(kr_data); + if (ret != 0) + break; /* stop the link training */ + + ret = al_eth_kr_lt_transmitter_task_run(kr_data); + if (ret != 0) + break; /* stop the link training */ + + cnt++; + al_udelay(100); + + } while ((al_eth_kr_startup_proto_prog_get(kr_data->adapter)) && + (cnt <= AL_ETH_KR_LT_MAX_ROUNDS)); + + training_failure = al_eth_kr_training_status_fail_get(kr_data->adapter); + al_dbg("[%s] training ended after %d rounds, failed = %s\n", + kr_data->adapter->name, cnt, + (training_failure) ? "Yes" : "No"); + if(training_failure || cnt > AL_ETH_KR_LT_MAX_ROUNDS) { + al_warn("[%s] Training Fail: status: %s, timeout: %s\n", + kr_data->adapter->name, + (training_failure) ? "Failed" : "OK", + (cnt > AL_ETH_KR_LT_MAX_ROUNDS) ? "Yes" : "No"); + /* + * note: link is now disabled, + * until training becomes disabled (see below). + */ + + ret = -EIO; + goto error; + } + + } else { + + al_info("[%s] FAILED: did not achieve initial frame lock...\n", + kr_data->adapter->name); + + ret = -EIO; + goto error; + } + + /* + * ensure to stop link training at the end to allow normal PCS + * datapath to operate in case of training failure. + */ + al_eth_kr_lt_stop(kr_data->adapter); + + cnt = AL_ETH_KR_LT_DONE_TIMEOUT; + while (an_completed == AL_FALSE) { + al_eth_kr_an_status_check(kr_data->adapter, + &page_received, + &an_completed, + &error); + al_udelay(1); + if ((cnt--) == 0) { + al_info("%s: wait for an complete timeout!\n", __func__); + ret = -ETIMEDOUT; + goto error; + } + } + +error: + al_eth_kr_an_stop(kr_data->adapter); + + return ret; +} + +/* execute Autonegotiation process */ +int al_eth_an_lt_execute(struct al_hal_eth_adapter *adapter, + struct al_serdes_obj *serdes_obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + struct al_eth_an_adv *an_adv, + struct al_eth_an_adv *partner_adv) +{ + struct al_eth_kr_data kr_data; + int rc; + struct al_serdes_adv_rx_params rx_params; + + al_memset(&kr_data, 0, sizeof(struct al_eth_kr_data)); + + kr_data.adapter = adapter; + kr_data.serdes_obj = serdes_obj; + kr_data.grp = grp; + kr_data.lane = lane; + + /* the link training progress will run rx equalization so need to make + * sure rx parameters is not been override */ + rx_params.override = AL_FALSE; + al_serdes_rx_advanced_params_set(kr_data.serdes_obj, + kr_data.grp, + kr_data.lane, + &rx_params); + + rc = al_eth_kr_an_run(&kr_data, an_adv, partner_adv); + if (rc) { + al_eth_kr_lt_stop(adapter); + al_eth_kr_an_stop(adapter); + al_dbg("%s: auto-negotiation failed!\n", __func__); + return rc; + } + + if (partner_adv->technology != AL_ETH_AN_TECH_10GBASE_KR) { + al_eth_kr_lt_stop(adapter); + al_eth_kr_an_stop(adapter); + al_dbg("%s: link partner isn't 10GBASE_KR.\n", __func__); + return rc; + } + + rc = al_eth_kr_run_lt(&kr_data); + if (rc) { + al_eth_kr_lt_stop(adapter); + al_eth_kr_an_stop(adapter); + al_dbg("%s: Link-training failed!\n", __func__); + return rc; + } + + return 0; +} + diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_init_eth_kr.h b/target/linux/alpine/files/drivers/net/ethernet/al/al_init_eth_kr.h new file mode 100644 index 00000000000000..71107904cc8100 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_init_eth_kr.h @@ -0,0 +1,80 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ +/** + * Ethernet + * @{ + * @file al_init_eth_kr.h + * + * @brief auto-negotiation and link training activation sequence + * + * + */ + +#ifndef __AL_INIT_ETH_KR_H__ +#define __AL_INIT_ETH_KR_H__ + +#include +#include "al_hal_eth_kr.h" + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +/** + * execute Auto-negotiation process + * + * @param adapter pointer to the private structure + * @param serdes_obj pointer to serdes private structure + * @param grp serdes's group + * @param lane serdes's lane + * @param an_adv pointer to the AN Advertisement Registers structure + * when NULL, the registers will not be updated. + * @param partner_adv pointer to the AN Advertisement received from the lp + * + * @return 0 on success. otherwise on failure. + */ +int al_eth_an_lt_execute(struct al_hal_eth_adapter *adapter, + struct al_serdes_obj *serdes_obj, + enum al_serdes_group grp, + enum al_serdes_lane lane, + struct al_eth_an_adv *an_adv, + struct al_eth_an_adv *partner_adv); + +#ifdef __cplusplus +} +#endif +#endif /*__AL_INIT_ETH_KR_H__*/ diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_init_eth_lm.c b/target/linux/alpine/files/drivers/net/ethernet/al/al_init_eth_lm.c new file mode 100644 index 00000000000000..dec98c5705b3e8 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_init_eth_lm.c @@ -0,0 +1,741 @@ +/******************************************************************************* +Copyright (C) 2014 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/** + * @{ + * @file al_init_eth_lm.c + * + * @brief ethernet link management common utilities + * + */ + +#include +#include +#include "al_init_eth_lm.h" +#include "al_hal_eth.h" +#include "al_init_eth_kr.h" + +/* delay before checking link status with new serdes parameters (uSec) */ +#define AL_ETH_LM_LINK_STATUS_DELAY 1000 +/* delay before checking link status after reconfiguring the retimer (mSec) */ +#define AL_ETH_LM_RETIMER_LINK_STATUS_DELAY 50 + +#define AL_ETH_LM_EQ_ITERATIONS 15 +#define AL_ETH_LM_MAX_DCGAIN 8 + +/* num of link training failures till serdes reset */ +#define AL_ETH_LT_FAILURES_TO_RESET (10) + +#define SFP_I2C_HEADER_10G_IDX 3 +#define SFP_I2C_HEADER_10G_DA_IDX 8 +#define SFP_I2C_HEADER_10G_DA_LEN_IDX 18 +#define SFP_I2C_HEADER_1G_IDX 6 + +#define RETIMER_I2C_BOOST_CH_A_ADDR 0xf +#define RETIMER_I2C_BOOST_CH_B_ADDR 0x16 + +#define SFP_10G_DA_ACTIVE 0x8 +#define SFP_10G_DA_PASSIVE 0x4 + +#define lm_debug(...) \ + do { \ + if (lm_context->debug) \ + al_warn(__VA_ARGS__); \ + else \ + al_dbg(__VA_ARGS__); \ + } while (0) + +static int al_eth_sfp_detect(struct al_eth_lm_context *lm_context, + enum al_eth_lm_link_mode *new_mode) +{ + int rc = 0; + uint8_t sfp_10g; + uint8_t sfp_1g; + uint8_t sfp_cable_tech; + uint8_t sfp_da_len; + + do { + rc = lm_context->i2c_read(lm_context->i2c_context, + lm_context->sfp_bus_id, + lm_context->sfp_i2c_addr, + SFP_I2C_HEADER_10G_IDX, &sfp_10g); + if (rc) + break; + + rc = lm_context->i2c_read(lm_context->i2c_context, + lm_context->sfp_bus_id, + lm_context->sfp_i2c_addr, + SFP_I2C_HEADER_1G_IDX, &sfp_1g); + if (rc) + break; + + rc = lm_context->i2c_read(lm_context->i2c_context, + lm_context->sfp_bus_id, + lm_context->sfp_i2c_addr, + SFP_I2C_HEADER_10G_DA_IDX, + &sfp_cable_tech); + if (rc) + break; + + rc = lm_context->i2c_read(lm_context->i2c_context, + lm_context->sfp_bus_id, + lm_context->sfp_i2c_addr, + SFP_I2C_HEADER_10G_DA_LEN_IDX, + &sfp_da_len); + } while (0); + + if (rc) { + if (rc == -ETIMEDOUT) { + /* ETIMEDOUT is returned when no SFP is connected */ + lm_debug("%s: SFP Disconnected\n", __func__); + *new_mode = AL_ETH_LM_MODE_DISCONNECTED; + } else { + return rc; + } + } else if (sfp_cable_tech & SFP_10G_DA_PASSIVE) { + lm_debug("%s: 10G passive DAC (%d M) detected\n", __func__, sfp_da_len); + *new_mode = AL_ETH_LM_MODE_10G_DA; + lm_context->da_len = sfp_da_len; + } else if (sfp_cable_tech & SFP_10G_DA_ACTIVE) { + lm_debug("%s: 10G DAC active (%d M) detected\n", __func__, sfp_da_len); + *new_mode = AL_ETH_LM_MODE_10G_DA; + /* for active direct attached need to use len 0 in the retimer configuration */ + lm_context->da_len = 0; + } else if (sfp_10g != 0) { + lm_debug("%s: 10 SFP detected\n", __func__); + *new_mode = AL_ETH_LM_MODE_10G_OPTIC; + } else if (sfp_1g != 0) { + lm_debug("%s: 1G SFP detected\n", __func__); + *new_mode = AL_ETH_LM_MODE_1G; + } else { + al_warn("%s: unknown SFP inserted. eeprom content: 10G compliance 0x%x," + " 1G compliance 0x%x, sfp+cable 0x%x. default to %s\n", + __func__, sfp_10g, sfp_1g, sfp_cable_tech, + al_eth_lm_mode_convert_to_str(lm_context->default_mode)); + *new_mode = lm_context->default_mode; + lm_context->da_len = lm_context->default_dac_len; + } + + lm_context->mode = *new_mode; + + return 0; +} + +static struct al_serdes_adv_tx_params da_tx_params = { + .override = AL_TRUE, + .amp = 0x1, + .total_driver_units = 0x13, + .c_plus_1 = 0x2, + .c_plus_2 = 0, + .c_minus_1 = 0x2, + .slew_rate = 0, +}; + +static struct al_serdes_adv_rx_params da_rx_params = { + .override = AL_TRUE, + .dcgain = 0x4, + .dfe_3db_freq = 0x4, + .dfe_gain = 0x3, + .dfe_first_tap_ctrl = 0x5, + .dfe_secound_tap_ctrl = 0x1, + .dfe_third_tap_ctrl = 0x8, + .dfe_fourth_tap_ctrl = 0x1, + .low_freq_agc_gain = 0x7, + .precal_code_sel = 0, + .high_freq_agc_boost = 0x1d, +}; + +static struct al_serdes_adv_tx_params optic_tx_params = { + .override = AL_TRUE, + .amp = 0x1, + .total_driver_units = 0x13, + .c_plus_1 = 0x2, + .c_plus_2 = 0, + .c_minus_1 = 0, + .slew_rate = 0, +}; + +static struct al_serdes_adv_rx_params optic_rx_params = { + .override = AL_TRUE, + .dcgain = 0x0, + .dfe_3db_freq = 0x7, + .dfe_gain = 0x0, + .dfe_first_tap_ctrl = 0x0, + .dfe_secound_tap_ctrl = 0x8, + .dfe_third_tap_ctrl = 0x0, + .dfe_fourth_tap_ctrl = 0x8, + .low_freq_agc_gain = 0x7, + .precal_code_sel = 0, + .high_freq_agc_boost = 0x4, +}; + + +void al_eth_serdes_static_tx_params_set(struct al_eth_lm_context *lm_context) +{ + if (lm_context->tx_param_dirty == 0) + return; + + if (lm_context->serdes_tx_params_valid) { + lm_context->tx_param_dirty = 0; + + lm_context->tx_params_override.override = AL_TRUE; + + al_serdes_tx_advanced_params_set(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + &lm_context->tx_params_override); + + + } else if (lm_context->static_values) { + lm_context->tx_param_dirty = 0; + + if ((!lm_context->retimer_exist) && (lm_context->mode == AL_ETH_LM_MODE_10G_DA)) + al_serdes_tx_advanced_params_set(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + &da_tx_params); + else + al_serdes_tx_advanced_params_set(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + &optic_tx_params); + } +} + +void al_eth_serdes_static_rx_params_set(struct al_eth_lm_context *lm_context) +{ + if (lm_context->rx_param_dirty == 0) + return; + + if (lm_context->serdes_rx_params_valid) { + lm_context->rx_param_dirty = 0; + + lm_context->rx_params_override.override = AL_TRUE; + + al_serdes_rx_advanced_params_set(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + &lm_context->rx_params_override); + + + } else if (lm_context->static_values) { + lm_context->rx_param_dirty = 0; + + if ((!lm_context->retimer_exist) && (lm_context->mode == AL_ETH_LM_MODE_10G_DA)) + al_serdes_rx_advanced_params_set(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + &da_rx_params); + else + al_serdes_rx_advanced_params_set(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + &optic_rx_params); + } +} + +int al_eth_rx_equal_run(struct al_eth_lm_context *lm_context) +{ + struct al_serdes_adv_rx_params rx_params; + int dcgain; + int best_dcgain = -1; + int i; + uint8_t reg; + int best_score = -1; + int test_score = -1; + + rx_params.override = AL_FALSE; + al_serdes_rx_advanced_params_set(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + &rx_params); + + lm_debug("score | dcgain | dfe3db | dfegain | tap1 | tap2 | tap3 | tap4 | low freq | high freq\n"); + + for (dcgain = 0; dcgain < AL_ETH_LM_MAX_DCGAIN; dcgain++) { + al_serdes_reg_read(lm_context->serdes_obj, + lm_context->grp, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_REG_NUM, + ®); + + AL_REG_FIELD_SET(reg, + SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_MASK, + SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_SHIFT, + dcgain); + + al_serdes_reg_write(lm_context->serdes_obj, + lm_context->grp, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_REG_NUM, + reg); + + test_score = al_serdes_rx_equalization(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane); + + if (test_score < 0) { + al_warn("serdes rx equalization failed on error\n"); + return test_score; + } + + if (test_score > best_score) { + best_score = test_score; + best_dcgain = dcgain; + } + + al_serdes_rx_advanced_params_get(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + &rx_params); + + lm_debug("%6d|%8x|%8x|%9x|%6x|%6x|%6x|%6x|%10x|%10x|\n", + test_score, rx_params.dcgain, rx_params.dfe_3db_freq, + rx_params.dfe_gain, rx_params.dfe_first_tap_ctrl, + rx_params.dfe_secound_tap_ctrl, rx_params.dfe_third_tap_ctrl, + rx_params.dfe_fourth_tap_ctrl, rx_params.low_freq_agc_gain, + rx_params.high_freq_agc_boost); + + } + + al_serdes_reg_read(lm_context->serdes_obj, + lm_context->grp, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_REG_NUM, + ®); + + AL_REG_FIELD_SET(reg, + SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_MASK, + SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_SHIFT, + best_dcgain); + + al_serdes_reg_write(lm_context->serdes_obj, + lm_context->grp, + AL_SRDS_REG_PAGE_4_COMMON, + AL_SRDS_REG_TYPE_PMA, + SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_REG_NUM, + reg); + + best_score = -1; + for(i = 0; i < AL_ETH_LM_EQ_ITERATIONS; i++) { + test_score = al_serdes_rx_equalization(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane); + + if (test_score < 0) { + al_warn("serdes rx equalization failed on error\n"); + return test_score; + } + + if (test_score > best_score) { + best_score = test_score; + al_serdes_rx_advanced_params_get(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + &rx_params); + } + } + + rx_params.precal_code_sel = 0; + rx_params.override = AL_TRUE; + al_serdes_rx_advanced_params_set(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + &rx_params); + + lm_debug("-------------------- best dcgain %d ------------------------------------\n", best_dcgain); + lm_debug("%6d|%8x|%8x|%9x|%6x|%6x|%6x|%6x|%10x|%10x|\n", + best_score, rx_params.dcgain, rx_params.dfe_3db_freq, + rx_params.dfe_gain, rx_params.dfe_first_tap_ctrl, + rx_params.dfe_secound_tap_ctrl, rx_params.dfe_third_tap_ctrl, + rx_params.dfe_fourth_tap_ctrl, rx_params.low_freq_agc_gain, + rx_params.high_freq_agc_boost); + + return 0; + +} + +int al_eth_lm_retimer_config(struct al_eth_lm_context *lm_context) +{ + int rc = 0; + uint8_t boost = 0; + + if ((lm_context->mode != AL_ETH_LM_MODE_10G_DA) || (lm_context->da_len == 0)) + boost = 0; /* ~5dB Loss */ + else if (lm_context->da_len <= 1) + boost = 0x1; /* ~5dB Loss */ + else if (lm_context->da_len <= 2) + boost = 0x2; /* ~9dB Loss */ + else if (lm_context->da_len <= 3) + boost = 0x3; /* ~12dB Loss */ + else if (lm_context->da_len <= 5) + boost = 0x7; /* ~16dB Loss */ + else + boost = 0xb; /* ~19dB Loss */ + + lm_debug("config retimer boost in channel %d to 0x%x\n", + lm_context->retimer_channel, boost); + + if (lm_context->retimer_channel == AL_ETH_RETIMER_CHANNEL_A) + rc = lm_context->i2c_write(lm_context->i2c_context, + lm_context->retimer_bus_id, + lm_context->retimer_i2c_addr, + RETIMER_I2C_BOOST_CH_A_ADDR, boost); + else + rc = lm_context->i2c_write(lm_context->i2c_context, + lm_context->retimer_bus_id, + lm_context->retimer_i2c_addr, + RETIMER_I2C_BOOST_CH_B_ADDR, boost); + + if (rc) { + al_err("%s: Error occurred (%d) while writing retimer configuration (bus-id %x i2c-addr %x)\n", + __func__, rc, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr); + return rc; + } + + + return 0; +} + +/*****************************************************************************/ +/***************************** API functions *********************************/ +/*****************************************************************************/ +int al_eth_lm_init(struct al_eth_lm_context *lm_context, + struct al_eth_lm_init_params *params) +{ + lm_context->adapter = params->adapter; + lm_context->serdes_obj = params->serdes_obj; + lm_context->grp = params->grp; + lm_context->lane = params->lane; + lm_context->sfp_detection = params->sfp_detection; + lm_context->sfp_bus_id = params->sfp_bus_id; + lm_context->sfp_i2c_addr = params->sfp_i2c_addr; + + lm_context->retimer_exist = params->retimer_exist; + lm_context->retimer_bus_id = params->retimer_bus_id; + lm_context->retimer_i2c_addr = params->retimer_i2c_addr; + lm_context->retimer_channel = params->retimer_channel; + + lm_context->default_mode = params->default_mode; + lm_context->default_dac_len = params->default_dac_len; + lm_context->link_training = params->link_training; + lm_context->rx_equal = params->rx_equal; + lm_context->static_values = params->static_values; + lm_context->i2c_read = params->i2c_read; + lm_context->i2c_write = params->i2c_write; + lm_context->i2c_context = params->i2c_context; + lm_context->get_random_byte = params->get_random_byte; + + /* eeprom_read must be provided if sfp_detection is true */ + al_assert((lm_context->sfp_detection == AL_FALSE) || + (lm_context->i2c_read != NULL)); + + al_assert((lm_context->retimer_exist == AL_FALSE) || + (lm_context->i2c_write != NULL)); + + lm_context->local_adv.selector_field = 1; + lm_context->local_adv.capability = 0; + lm_context->local_adv.remote_fault = 0; + lm_context->local_adv.acknowledge = 0; + lm_context->local_adv.next_page = 0; + lm_context->local_adv.technology = AL_ETH_AN_TECH_10GBASE_KR; + lm_context->local_adv.fec_capability = params->kr_fec_enable; + + lm_context->mode = AL_ETH_LM_MODE_DISCONNECTED; + lm_context->serdes_tx_params_valid = AL_FALSE; + lm_context->serdes_rx_params_valid = AL_FALSE; + + lm_context->last_link_status.link_up = AL_FALSE; + + lm_context->rx_param_dirty = 1; + lm_context->tx_param_dirty = 1; + + return 0; +} + + +int al_eth_lm_link_detection(struct al_eth_lm_context *lm_context, + al_bool *link_fault, + enum al_eth_lm_link_mode *old_mode, + enum al_eth_lm_link_mode *new_mode) +{ + int err; + struct al_eth_link_status status; + + al_assert(lm_context != NULL); + al_assert(old_mode != NULL); + al_assert(new_mode != NULL); + + al_eth_link_status_get(lm_context->adapter, &status); + + *old_mode = lm_context->mode; + *new_mode = lm_context->mode; + + + if (status.link_up == AL_FALSE) + al_eth_led_set(lm_context->adapter, AL_FALSE); + + if (link_fault) { + *link_fault = AL_FALSE; + + /* link status lost */ + if ((lm_context->last_link_status.link_up == AL_TRUE) && + (status.link_up == AL_TRUE)) + return 0; + + lm_context->last_link_status = status; + *link_fault = AL_TRUE; + } + + if (lm_context->sfp_detection) { + err = al_eth_sfp_detect(lm_context, new_mode); + if (err) { + al_err("sfp_detection failed!\n"); + return err; + } + + lm_context->mode = *new_mode; + } else { + lm_context->mode = lm_context->default_mode; + *new_mode = lm_context->mode; + } + + if (*old_mode != *new_mode) { + al_info("%s: New SFP mode detected %s -> %s\n", + __func__, al_eth_lm_mode_convert_to_str(*old_mode), + al_eth_lm_mode_convert_to_str(*new_mode)); + + lm_context->rx_param_dirty = 1; + lm_context->tx_param_dirty = 1; + } + + return 0; +} + + +int al_eth_lm_link_establish(struct al_eth_lm_context *lm_context, + al_bool *link_up) +{ + al_bool signal_detected; + int ret = 0; + + signal_detected = al_serdes_signal_is_detected( + lm_context->serdes_obj, + lm_context->grp, + lm_context->lane); + + if (signal_detected == AL_FALSE) { + /* if no signal detected there is nothing to do */ + *link_up = AL_FALSE; + return 0; + } + + if (lm_context->retimer_exist) { + al_eth_serdes_static_rx_params_set(lm_context); + al_eth_serdes_static_tx_params_set(lm_context); + al_eth_lm_retimer_config(lm_context); + al_msleep(AL_ETH_LM_RETIMER_LINK_STATUS_DELAY); + + al_eth_link_status_get(lm_context->adapter, &lm_context->last_link_status); + + if (lm_context->last_link_status.link_up == AL_TRUE) { + al_info("%s: link up with retimer\n", __func__); + goto link_is_up; + } + + *link_up = AL_FALSE; + return -1; + } + + if ((lm_context->mode == AL_ETH_LM_MODE_10G_DA) && (lm_context->link_training)) { + lm_context->local_adv.transmitted_nonce = lm_context->get_random_byte(); + lm_context->local_adv.transmitted_nonce &= 0x1f; + + ret = al_eth_an_lt_execute(lm_context->adapter, + lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + &lm_context->local_adv, + &lm_context->partner_adv); + + lm_context->rx_param_dirty = 1; + lm_context->tx_param_dirty = 1; + + if (ret == 0) { + al_info("%s: link training finished successfully\n", __func__); + lm_context->last_link_status.link_up = AL_TRUE; + lm_context->link_training_failures = 0; + goto link_is_up; + } + + lm_context->link_training_failures++; + if (lm_context->link_training_failures > AL_ETH_LT_FAILURES_TO_RESET) { + lm_debug("%s: failed to establish LT %d times. reset serdes\n", + __func__, AL_ETH_LT_FAILURES_TO_RESET); + + al_serdes_pma_hard_reset_lane(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + AL_TRUE); + al_serdes_pma_hard_reset_lane(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + AL_FALSE); + lm_context->link_training_failures = 0; + } + } + + al_eth_serdes_static_tx_params_set(lm_context); + + if ((lm_context->mode == AL_ETH_LM_MODE_10G_DA) && (lm_context->rx_equal)) { + ret = al_eth_rx_equal_run(lm_context); + + if (ret == 0) { + al_udelay(AL_ETH_LM_LINK_STATUS_DELAY); + al_eth_link_status_get(lm_context->adapter, &lm_context->last_link_status); + if (lm_context->last_link_status.link_up == AL_TRUE) { + al_info("%s: rx_equal finished successfully\n", __func__); + goto link_is_up; + + } + } + } + + al_eth_serdes_static_rx_params_set(lm_context); + + al_udelay(AL_ETH_LM_LINK_STATUS_DELAY); + + al_eth_link_status_get(lm_context->adapter, &lm_context->last_link_status); + + if (lm_context->last_link_status.link_up == AL_TRUE) { + al_info("%s: link up with static parameters\n", __func__); + goto link_is_up; + } + + *link_up = AL_FALSE; + return -1; + + link_is_up: + al_eth_led_set(lm_context->adapter, AL_TRUE); + *link_up = AL_TRUE; + return 0; + +} + +int al_eth_lm_static_parameters_override(struct al_eth_lm_context *lm_context, + struct al_serdes_adv_tx_params *tx_params, + struct al_serdes_adv_rx_params *rx_params) +{ + if (tx_params) { + lm_context->tx_params_override = *tx_params; + lm_context->tx_param_dirty = 1; + lm_context->serdes_tx_params_valid = AL_TRUE; + } + + if (rx_params) { + lm_context->rx_params_override = *rx_params; + lm_context->rx_param_dirty = 1; + lm_context->serdes_rx_params_valid = AL_TRUE; + } + + return 0; +} + +int al_eth_lm_static_parameters_override_disable( + struct al_eth_lm_context *lm_context, + al_bool tx_params, + al_bool rx_params) +{ + if (tx_params) + lm_context->serdes_tx_params_valid = AL_FALSE; + if (rx_params) + lm_context->serdes_tx_params_valid = AL_FALSE; + + return 0; +} + +int al_eth_lm_static_parameters_get(struct al_eth_lm_context *lm_context, + struct al_serdes_adv_tx_params *tx_params, + struct al_serdes_adv_rx_params *rx_params) +{ + if (tx_params) { + if (lm_context->serdes_tx_params_valid) + *tx_params = lm_context->tx_params_override; + else + al_serdes_tx_advanced_params_get(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + tx_params); + } + + if (rx_params) { + if (lm_context->serdes_rx_params_valid) + *rx_params = lm_context->rx_params_override; + else + al_serdes_rx_advanced_params_get(lm_context->serdes_obj, + lm_context->grp, + lm_context->lane, + rx_params); + } + + return 0; +} + +const char *al_eth_lm_mode_convert_to_str(enum al_eth_lm_link_mode val) +{ + switch (val) { + case AL_ETH_LM_MODE_DISCONNECTED: + return "AL_ETH_LM_MODE_DISCONNECTED"; + case AL_ETH_LM_MODE_10G_OPTIC: + return "AL_ETH_LM_MODE_10G_OPTIC"; + case AL_ETH_LM_MODE_10G_DA: + return "AL_ETH_LM_MODE_10G_DA"; + case AL_ETH_LM_MODE_1G: + return "AL_ETH_LM_MODE_1G"; + } + + return "N/A"; +} + +void al_eth_lm_debug_mode_set(struct al_eth_lm_context *lm_context, + al_bool enable) +{ + lm_context->debug = enable; +} diff --git a/target/linux/alpine/files/drivers/net/ethernet/al/al_init_eth_lm.h b/target/linux/alpine/files/drivers/net/ethernet/al/al_init_eth_lm.h new file mode 100644 index 00000000000000..399ebd7d93ae1a --- /dev/null +++ b/target/linux/alpine/files/drivers/net/ethernet/al/al_init_eth_lm.h @@ -0,0 +1,324 @@ +/******************************************************************************* +Copyright (C) 2014 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ +/** + * Ethernet + * @{ + * @file al_init_eth_lm.h + * + * @brief ethernet link management common utilities + * + * Common operation example: + * @code + * int main() + * { + * struct al_eth_lm_context lm_context; + * struct al_eth_lm_init_params lm_params; + * enum al_eth_lm_link_mode old_mode; + * enum al_eth_lm_link_mode new_mode; + * al_bool fault; + * al_bool link_up; + * int rc = 0; + * + * lm_params.adapter = hal_adapter; + * lm_params.serdes_obj = serdes; + * lm_params.grp = grp; + * lm_params.lane = lane; + * lm_params.sfp_detection = true; + * lm_params.link_training = true; + * lm_params.rx_equal = true + * lm_params.static_values = true; + * lm_params.kr_fec_enable = false; + * lm_params.eeprom_read = &my_eeprom_read; + * lm_params.eeprom_context = context; + * lm_params.get_random_byte = &my_rand_byte; + * lm_params.default_mode = AL_ETH_LM_MODE_10G_DA; + * + * al_eth_lm_init(&lm_context, &lm_params); + * + * rc = al_eth_lm_link_detection(&lm_context, &fault, &old_mode, &new_mode); + * if (fault == false) + * return; // in this case the link is still up + * + * if (rc) { + * printf("link detection failed on error\n"); + * return; + * } + * + * if (old_mode != new_mode) { + * // perform serdes configuration if needed + * + * // mac stop / start / config if needed + * } + * + * spin_lock(lock); + * rc = al_eth_lm_link_establish($lm_context, &link_up); + * spin_unlock(lock); + * if (rc) { + * printf("establish link failed\n"); + * return; + * } + * + * if (link_up) + * printf("Link established successfully\n"); + * else + * printf("No signal found. probably the link partner is disconnected\n"); + * } + * @endcode + * + */ + +#ifndef __AL_INIT_ETH_LM_H__ +#define __AL_INIT_ETH_LM_H__ + +#include +#include "al_hal_eth.h" +#include "al_init_eth_kr.h" + + +enum al_eth_lm_link_mode { + AL_ETH_LM_MODE_DISCONNECTED, + AL_ETH_LM_MODE_10G_OPTIC, + AL_ETH_LM_MODE_10G_DA, + AL_ETH_LM_MODE_1G, +}; + +struct al_eth_lm_context { + struct al_hal_eth_adapter *adapter; + struct al_serdes_obj *serdes_obj; + enum al_serdes_group grp; + enum al_serdes_lane lane; + + struct al_eth_link_status last_link_status; + uint32_t link_training_failures; + + al_bool tx_param_dirty; + al_bool serdes_tx_params_valid; + struct al_serdes_adv_tx_params tx_params_override; + al_bool rx_param_dirty; + al_bool serdes_rx_params_valid; + struct al_serdes_adv_rx_params rx_params_override; + + struct al_eth_an_adv local_adv; + struct al_eth_an_adv partner_adv; + + enum al_eth_lm_link_mode mode; + uint8_t da_len; + al_bool debug; + + /* configurations */ + al_bool sfp_detection; + uint8_t sfp_bus_id; + uint8_t sfp_i2c_addr; + + enum al_eth_lm_link_mode default_mode; + uint8_t default_dac_len; + al_bool link_training; + al_bool rx_equal; + al_bool static_values; + + al_bool retimer_exist; + uint8_t retimer_bus_id; + uint8_t retimer_i2c_addr; + enum al_eth_retimer_channel retimer_channel; + + /* services */ + int (*i2c_read)(void *handle, uint8_t bus_id, uint8_t i2c_addr, + uint8_t reg_addr, uint8_t *val); + int (*i2c_write)(void *handle, uint8_t bus_id, uint8_t i2c_addr, + uint8_t reg_addr, uint8_t val); + void *i2c_context; + uint8_t (*get_random_byte)(void); +}; + +struct al_eth_lm_init_params { + /* pointer to HAL context */ + struct al_hal_eth_adapter *adapter; + /* pointer to serdes object */ + struct al_serdes_obj *serdes_obj; + /* serdes group for this port */ + enum al_serdes_group grp; + /* serdes lane for this port */ + enum al_serdes_lane lane; + + /* + * set to true to perform sfp detection if the link is down. + * when set to true, eeprom_read below should NOT be NULL. + */ + al_bool sfp_detection; + /* i2c bus id of the SFP for this port */ + uint8_t sfp_bus_id; + /* i2c addr of the SFP for this port */ + uint8_t sfp_i2c_addr; + /* + * default mode, and dac length will be used in case sfp_detection + * is not set or in case the detection failed. + */ + enum al_eth_lm_link_mode default_mode; + uint8_t default_dac_len; + + /* the i2c bus id and addr of the retimer in case it exist */ + uint8_t retimer_bus_id; + uint8_t retimer_i2c_addr; + /* retimer channel connected to this port */ + enum al_eth_retimer_channel retimer_channel; + + /* + * the following parameters control what mechanisms to run + * on link_establish with the following steps: + * - if retimer_exist is set, the retimer will be configured based on DA len. + * - if link_training is set and DA detected run link training. if succeed return 0 + * - if rx_equal is set serdes equalization will be run to configure the rx parameters. + * - if static_values is set, tx and rx values will be set based on static values. + */ + al_bool retimer_exist; + al_bool link_training; + al_bool rx_equal; + al_bool static_values; + + /* enable / disable fec capabilities in AN */ + al_bool kr_fec_enable; + + /* + * pointer to function that's read 1 byte from eeprom + * in case no eeprom is connected should return -ETIMEDOUT + */ + int (*i2c_read)(void *handle, uint8_t bus_id, uint8_t i2c_addr, + uint8_t reg_addr, uint8_t *val); + int (*i2c_write)(void *handle, uint8_t bus_id, uint8_t i2c_addr, + uint8_t reg_addr, uint8_t val); + void *i2c_context; + /* pointer to function that return 1 rand byte */ + uint8_t (*get_random_byte)(void); +}; + +/** + * initialize link management context and set configuration + * + * @param lm_context pointer to link management context + * @param params parameters passed from upper layer + * + * @return 0 in case of success. otherwise on failure. + */ +int al_eth_lm_init(struct al_eth_lm_context *lm_context, + struct al_eth_lm_init_params *params); + +/** + * perform link status check. in case link is down perform sfp detection + * + * @param lm_context pointer to link management context + * @param link_fault indicate if the link is down + * @param old_mode the last working mode + * @param new_mode the new mode detected in this call + * + * @return 0 in case of success. otherwise on failure. + */ +int al_eth_lm_link_detection(struct al_eth_lm_context *lm_context, + al_bool *link_fault, + enum al_eth_lm_link_mode *old_mode, + enum al_eth_lm_link_mode *new_mode); + +/** + * run LT, rx equalization and static values override according to configuration + * This function MUST be called inside a lock as it using common serdes registers + * + * @param lm_context pointer to link management context + * @param link_up set to true in case link is establish successfully + * + * @return < 0 in case link was failed to be established + */ +int al_eth_lm_link_establish(struct al_eth_lm_context *lm_context, + al_bool *link_up); + +/** + * override the default static parameters + * + * @param lm_context pointer to link management context + * @param tx_params pointer to new tx params + * @param rx_params pointer to new rx params + * + * @return 0 in case of success. otherwise on failure. + **/ +int al_eth_lm_static_parameters_override(struct al_eth_lm_context *lm_context, + struct al_serdes_adv_tx_params *tx_params, + struct al_serdes_adv_rx_params *rx_params); + +/** + * disable serdes parameters override + * + * @param lm_context pointer to link management context + * @param tx_params set to true to disable override of tx params + * @param rx_params set to true to disable override of rx params + * + * @return 0 in case of success. otherwise on failure. + **/ +int al_eth_lm_static_parameters_override_disable( + struct al_eth_lm_context *lm_context, + al_bool tx_params, + al_bool rx_params); + +/** + * get the static parameters that are being used + * if the parameters was override - return the override values + * else return the current values of the parameters + * + * @param lm_context pointer to link management context + * @param tx_params pointer to new tx params + * @param rx_params pointer to new rx params + * + * @return 0 in case of success. otherwise on failure. + */ +int al_eth_lm_static_parameters_get(struct al_eth_lm_context *lm_context, + struct al_serdes_adv_tx_params *tx_params, + struct al_serdes_adv_rx_params *rx_params); + +/** + * convert link management mode to string + * + * @param val link management mode + * + * @return string of the mode + */ +const char *al_eth_lm_mode_convert_to_str(enum al_eth_lm_link_mode val); + +/** + * print all debug messages + * + * @param lm_context pointer to link management context + * @param enable set to true to enable debug mode + */ +void al_eth_lm_debug_mode_set(struct al_eth_lm_context *lm_context, + al_bool enable); + +#endif diff --git a/target/linux/alpine/files/drivers/net/mdio/mdio-al-gpio.c b/target/linux/alpine/files/drivers/net/mdio/mdio-al-gpio.c new file mode 100644 index 00000000000000..20c4f2236f1b23 --- /dev/null +++ b/target/linux/alpine/files/drivers/net/mdio/mdio-al-gpio.c @@ -0,0 +1,308 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("MDIO by GPIO"); +MODULE_AUTHOR("Winder Sung "); +MODULE_LICENSE("GPL"); + +#define GPIO_MDC_PIN 0 +#define GPIO_MDIO_PIN 1 + +#define ISIS_CHIP_ID 0x18 +#define ISIS_CHIP_REG 0 + +#define MARVELL_CHIP_ID 0x1302 + +static void al_mdio_gpio_mdc_pulse(void) /*Clock line */ +{ + /* 1 Mhz */ + udelay(1); + gpio_set_value(GPIO_MDC_PIN, 0); + udelay(1); + gpio_set_value(GPIO_MDC_PIN, 1); + udelay(1); +} + +static void al_mdio_gpio_idle(void) +{ + gpio_direction_output(GPIO_MDIO_PIN, 1); + al_mdio_gpio_mdc_pulse(); +} + +static void al_mdio_gpio_preamble(void) +{ + char i; + + gpio_direction_output(GPIO_MDIO_PIN, 1); + /* Transmit al_mdio_gpio_preamble 11....11(32 bits) */ + for(i=0 ; i < 32 ; i++) + al_mdio_gpio_mdc_pulse(); + /* Transmit Start of Frame '01' */ + gpio_set_value(GPIO_MDIO_PIN, 0); + al_mdio_gpio_mdc_pulse(); + gpio_set_value(GPIO_MDIO_PIN, 1); + al_mdio_gpio_mdc_pulse(); +} + +static unsigned int al_mdio_gpio_phy_reg_read(char phy_addr,char phy_reg) +{ + char i; + u16 phy_val; + + al_mdio_gpio_preamble(); + /*OP Code 10*/ + gpio_direction_output(GPIO_MDIO_PIN, 1); + al_mdio_gpio_mdc_pulse(); + gpio_set_value(GPIO_MDIO_PIN, 0); + al_mdio_gpio_mdc_pulse(); + /*5 bits PHY addr*/ + for(i = 0; i < 5; i++) + { + if(phy_addr & 0x10) + { + gpio_set_value(GPIO_MDIO_PIN, 1); + } + else + { + gpio_set_value(GPIO_MDIO_PIN, 0); + } + al_mdio_gpio_mdc_pulse(); + phy_addr <<= 1; + } + /*5 bits PHY reg*/ + for(i = 0; i < 5; i++) + { + if(phy_reg & 0x10) + { + gpio_set_value(GPIO_MDIO_PIN, 1); + } + else + { + gpio_set_value(GPIO_MDIO_PIN, 0); + } + al_mdio_gpio_mdc_pulse(); + phy_reg <<= 1; + } + /*Turnaround Z*/ + gpio_set_value(GPIO_MDIO_PIN, 1); + al_mdio_gpio_mdc_pulse(); + gpio_direction_input(GPIO_MDIO_PIN); + /*Read 16 bits Data*/ + phy_val = 0x0000; + for ( i = 0; i < 16; i++) + { + al_mdio_gpio_mdc_pulse(); + if (1 == gpio_get_value(GPIO_MDIO_PIN)) + phy_val |= 0x0001; + if (i < 15) + phy_val <<= 1; + } + al_mdio_gpio_idle(); + gpio_direction_output(GPIO_MDIO_PIN, 1); + + return phy_val; +} + +static int al_mdio_gpio_phy_reg_write(char phy_addr, char phy_reg, unsigned int phy_val) +{ + char i; + u16 Temp; + + al_mdio_gpio_preamble(); + /*OP Code 01*/ + gpio_direction_output(GPIO_MDIO_PIN, 0); + al_mdio_gpio_mdc_pulse(); + gpio_set_value(GPIO_MDIO_PIN, 1); + al_mdio_gpio_mdc_pulse(); + /*5 bits PHY addr*/ + for(i = 0; i < 5; i++) + { + if(phy_addr & 0x10) + { + gpio_set_value(GPIO_MDIO_PIN, 1); + } + else + { + gpio_set_value(GPIO_MDIO_PIN, 0); + } + al_mdio_gpio_mdc_pulse(); + phy_addr <<= 1; + } + /*5 bits PHY reg*/ + for(i = 0; i < 5; i++) + { + if(phy_reg & 0x10) + { + gpio_set_value(GPIO_MDIO_PIN, 1); + } + else + { + gpio_set_value(GPIO_MDIO_PIN, 0); + } + al_mdio_gpio_mdc_pulse(); + phy_reg <<= 1; + } + /*Turnaround 10*/ + gpio_set_value(GPIO_MDIO_PIN, 1); + al_mdio_gpio_mdc_pulse(); + gpio_set_value(GPIO_MDIO_PIN, 0); + al_mdio_gpio_mdc_pulse(); + /*Write 16 bits Data*/ + Temp = 0x8000; + for ( i = 0; i < 16; i++) + { + if(phy_val & Temp) + { + gpio_set_value(GPIO_MDIO_PIN, 1); + } + else + { + gpio_set_value(GPIO_MDIO_PIN, 0); + } + al_mdio_gpio_mdc_pulse(); + Temp >>= 1; + } + al_mdio_gpio_idle(); + + return 0; +} + +static int al_mdio_gpio_read(struct mii_bus *bus, int mii_id, int reg) +{ + return al_mdio_gpio_phy_reg_read(mii_id, reg); +} + +static int al_mdio_gpio_write(struct mii_bus *bus, int mii_id, int reg, u16 val) +{ + return al_mdio_gpio_phy_reg_write(mii_id, reg, val); +} + +static struct mii_bus *al_mdio_gpio_init(struct device *dev) +{ + struct mii_bus *bus; + u16 phy_val; + int ret; + + /* MDC */ + ret = gpio_request(GPIO_MDC_PIN, "gpio_as_mdc"); + if (ret) + return NULL; + + gpio_direction_output(GPIO_MDC_PIN, 1); + + /* MDIO */ + ret = gpio_request(GPIO_MDIO_PIN, "gpio_as_mdio"); + if (ret) + goto fail_free_mdc_pin; + + gpio_direction_output(GPIO_MDIO_PIN, 0); + + al_mdio_gpio_phy_reg_write(ISIS_CHIP_ID, ISIS_CHIP_REG, 0x0); + phy_val = al_mdio_gpio_phy_reg_read(0x10, 0x0); + if (phy_val != MARVELL_CHIP_ID) + goto fail_free_mdio_pin; + + bus = mdiobus_alloc(); + if (bus == NULL) + goto fail_free_mdio_pin; + + bus->parent = dev; + bus->name = "mdio-al-gpio"; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-0", bus->name); + bus->read = &al_mdio_gpio_read; + bus->write = &al_mdio_gpio_write; + + dev_set_drvdata(dev, bus); + + return bus; + +fail_free_mdio_pin: + gpio_free(GPIO_MDIO_PIN); + +fail_free_mdc_pin: + gpio_free(GPIO_MDC_PIN); + + return NULL; +} + +static void al_mdio_gpio_deinit(struct device *dev) +{ + struct mii_bus *bus = dev_get_drvdata(dev); + + mdiobus_free(bus); + gpio_free(GPIO_MDC_PIN); + gpio_free(GPIO_MDIO_PIN); +} + +static void al_mdio_gpio_destroy(struct device *dev) +{ + struct mii_bus *bus = dev_get_drvdata(dev); + + mdiobus_unregister(bus); + al_mdio_gpio_deinit(dev); +} + +static int al_mdio_gpio_probe(struct platform_device *pdev) +{ + struct mii_bus *bus; + int ret; + + bus = al_mdio_gpio_init(&pdev->dev); + if (!bus) + return -ENODEV; + + ret = of_mdiobus_register(bus, pdev->dev.of_node); + if (ret) + al_mdio_gpio_deinit(&pdev->dev); + + return ret; +} + +static int al_mdio_gpio_remove(struct platform_device *pdev) +{ + al_mdio_gpio_destroy(&pdev->dev); + + return 0; +} + +static const struct of_device_id al_mdio_gpio_of_match[] = { + { .compatible = "annapurna-labs,mdio-al-gpio", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, al_mdio_gpio_of_match); + +static struct platform_driver al_mdio_gpio_driver = { + .probe = al_mdio_gpio_probe, + .remove = al_mdio_gpio_remove, + .driver = { + .name = "mdio-al-gpio", + .of_match_table = al_mdio_gpio_of_match, + }, +}; + +module_platform_driver(al_mdio_gpio_driver); diff --git a/target/linux/alpine/files/drivers/thermal/al/Makefile b/target/linux/alpine/files/drivers/thermal/al/Makefile new file mode 100644 index 00000000000000..73b90dc3c836b3 --- /dev/null +++ b/target/linux/alpine/files/drivers/thermal/al/Makefile @@ -0,0 +1,3 @@ +ccflags-y := -I$(srctree)/arch/arm/mach-alpine/include + +obj-$(CONFIG_AL_THERMAL) += al_thermal.o al_hal_thermal_sensor.o diff --git a/target/linux/alpine/files/drivers/thermal/al/al_hal_thermal_sensor.c b/target/linux/alpine/files/drivers/thermal/al/al_hal_thermal_sensor.c new file mode 100644 index 00000000000000..155dc123d4d095 --- /dev/null +++ b/target/linux/alpine/files/drivers/thermal/al/al_hal_thermal_sensor.c @@ -0,0 +1,232 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#include "al_hal_thermal_sensor.h" +#include "al_hal_thermal_sensor_regs.h" + +#define OFFSET 1154 /* 115.4 x 10 - for better accuracy */ +#define MULTIPLIER 3523 /* 352.3 x 10 - for better accuracy */ + +#define READOUT_TO_CELCIUS(readout) \ + ((((((int)(readout)) * MULTIPLIER) / 4096) - OFFSET) / 10) + +#define CELCIUS_TO_READOUT(celcius) \ + ((((10 * (celcius)) + OFFSET) * 4096) / MULTIPLIER) + +#define AL_THERMAL_SENSOR_MIN_THRESHOLD_VAL 0 +#define AL_THERMAL_SENSOR_MAX_THRESHOLD_VAL 0xfff + +/****************************************************************************** + ******************************************************************************/ +int al_thermal_sensor_handle_init( + struct al_thermal_sensor_handle *thermal_sensor_handle, + void __iomem *thermal_sensor_reg_base) +{ + al_assert(thermal_sensor_handle); + al_assert(thermal_sensor_reg_base); + + thermal_sensor_handle->regs = (struct al_thermal_sensor_regs __iomem *) + thermal_sensor_reg_base; + + return 0; +} + +/****************************************************************************** + ******************************************************************************/ +void al_thermal_sensor_trim_set( + struct al_thermal_sensor_handle *thermal_sensor_handle, + unsigned int trim) +{ + struct al_thermal_sensor_regs __iomem *regs; + + al_assert(thermal_sensor_handle); + + regs = thermal_sensor_handle->regs; + + al_reg_write32_masked( + ®s->unit.static_config, + THERMAL_SENSE_UNIT_STATIC_CONFIG_TRIM_MASK, + trim << THERMAL_SENSE_UNIT_STATIC_CONFIG_TRIM_SHIFT); +} + +/****************************************************************************** + ******************************************************************************/ +void al_thermal_sensor_threshold_config( + struct al_thermal_sensor_handle *thermal_sensor_handle, + int low_temp_threshold, + int high_temp_threshold) +{ + struct al_thermal_sensor_regs __iomem *regs; + unsigned int low_threshold = + (low_temp_threshold == + AL_THERMAL_SENSOR_LOW_THRESHOLD_DISABLE) ? + AL_THERMAL_SENSOR_MIN_THRESHOLD_VAL : + (unsigned int)CELCIUS_TO_READOUT(low_temp_threshold); + unsigned int high_threshold = + (high_temp_threshold == + AL_THERMAL_SENSOR_HIGH_THRESHOLD_DISABLE) ? + AL_THERMAL_SENSOR_MAX_THRESHOLD_VAL : + (unsigned int)CELCIUS_TO_READOUT(high_temp_threshold); + + al_assert(thermal_sensor_handle); + + regs = thermal_sensor_handle->regs; + + al_reg_write32( + ®s->unit.threshold_config, + (low_threshold << + THERMAL_SENSE_UNIT_THRESHOLD_CONFIG_LOW_TEMP_THRESHOLD_SHIFT) | + (high_threshold << + THERMAL_SENSE_UNIT_THRESHOLD_CONFIG_HIGH_TEMP_THRESHOLD_SHIFT)); +} + +/****************************************************************************** + ******************************************************************************/ +void al_thermal_sensor_enable_set( + struct al_thermal_sensor_handle *thermal_sensor_handle, + int enable) +{ + struct al_thermal_sensor_regs __iomem *regs; + + al_assert(thermal_sensor_handle); + + regs = thermal_sensor_handle->regs; + + if (!enable) + al_reg_write32(®s->unit.dynamic_config, 0); + + al_reg_write32_masked( + ®s->unit.static_config, + THERMAL_SENSE_UNIT_STATIC_CONFIG_POWER_DOWN | + THERMAL_SENSE_UNIT_STATIC_CONFIG_ENABLE, + enable ? + THERMAL_SENSE_UNIT_STATIC_CONFIG_ENABLE : + THERMAL_SENSE_UNIT_STATIC_CONFIG_POWER_DOWN); +} + +/****************************************************************************** + ******************************************************************************/ +int al_thermal_sensor_is_ready( + struct al_thermal_sensor_handle *thermal_sensor_handle) +{ + struct al_thermal_sensor_regs __iomem *regs; + uint32_t status_reg_val; + int is_valid; + + al_assert(thermal_sensor_handle); + + regs = thermal_sensor_handle->regs; + + status_reg_val = al_reg_read32( + ®s->unit.status); + + is_valid = ((status_reg_val & THERMAL_SENSE_UNIT_STATUS_T_PWR_OK) && + (status_reg_val & THERMAL_SENSE_UNIT_STATUS_T_INIT_DONE)); + + return is_valid; +} + +/****************************************************************************** + ******************************************************************************/ +void al_thermal_sensor_trigger_once( + struct al_thermal_sensor_handle *thermal_sensor_handle) +{ + struct al_thermal_sensor_regs __iomem *regs; + + al_assert(thermal_sensor_handle); + + regs = thermal_sensor_handle->regs; + + al_reg_write32(®s->unit.dynamic_config, + THERMAL_SENSE_UNIT_DYNAMIC_CONFIG_RUN_ONCE); +} + +/****************************************************************************** + ******************************************************************************/ +void al_thermal_sensor_trigger_continuous( + struct al_thermal_sensor_handle *thermal_sensor_handle) +{ + struct al_thermal_sensor_regs __iomem *regs; + + al_assert(thermal_sensor_handle); + + regs = thermal_sensor_handle->regs; + + al_reg_write32(®s->unit.dynamic_config, + THERMAL_SENSE_UNIT_DYNAMIC_CONFIG_KEEP_RUNNING); +} + +/****************************************************************************** + ******************************************************************************/ +int al_thermal_sensor_readout_is_valid( + struct al_thermal_sensor_handle *thermal_sensor_handle) +{ + struct al_thermal_sensor_regs __iomem *regs; + uint32_t status_reg_val; + int is_valid; + + al_assert(thermal_sensor_handle); + + regs = thermal_sensor_handle->regs; + + status_reg_val = al_reg_read32( + ®s->unit.status); + + is_valid = ((status_reg_val & THERMAL_SENSE_UNIT_STATUS_T_PWR_OK) && + (status_reg_val & THERMAL_SENSE_UNIT_STATUS_T_INIT_DONE) && + (status_reg_val & THERMAL_SENSE_UNIT_STATUS_T_VALID)); + + if (is_valid) + thermal_sensor_handle->readout_raw = (status_reg_val & + THERMAL_SENSE_UNIT_STATUS_T_RESULT_MASK) >> + THERMAL_SENSE_UNIT_STATUS_T_RESULT_SHIFT; + + return is_valid; +} + +/****************************************************************************** + ******************************************************************************/ +int al_thermal_sensor_readout_get( + struct al_thermal_sensor_handle *thermal_sensor_handle) +{ + int readout; + + al_assert(thermal_sensor_handle); + + readout = READOUT_TO_CELCIUS(thermal_sensor_handle->readout_raw); + + return readout; +} + diff --git a/target/linux/alpine/files/drivers/thermal/al/al_hal_thermal_sensor.h b/target/linux/alpine/files/drivers/thermal/al/al_hal_thermal_sensor.h new file mode 100644 index 00000000000000..ec102201b0fb0a --- /dev/null +++ b/target/linux/alpine/files/drivers/thermal/al/al_hal_thermal_sensor.h @@ -0,0 +1,212 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +/** + * @defgroup group_thermal_sensor Thermal Sensor + * @ingroup group_sys_services + * @{ + * Common Operation Example: + * @code + * void main() + * { + * struct al_thermal_sensor_handle handle; + * int err; + * + * err = al_thermal_sensor_handle_init(&handle, (void __iomem*)0xfd860a00); + * if (err) + * return err; + * + * al_thermal_sensor_enable_set(&handle, 1); + * + * while (!al_thermal_sensor_is_ready(&handle)) + * udelay(1); + * + * al_thermal_sensor_trigger_once(&handle); + * + * while (!al_thermal_sensor_readout_is_valid(&handle)) + * udelay(1); + * + * printf("temprature: %d degrees\n", + * al_thermal_sensor_readout_get(&handle)); + * + * al_thermal_sensor_enable_set(&handle, 0); + * } + * @endcode + * @file al_hal_thermal_sensor.h + * @brief HAL Driver Header for the Annapurna Labs thermal sensor unit + * + */ + +#ifndef _AL_HAL_THERMAL_SENSE_H_ +#define _AL_HAL_THERMAL_SENSE_H_ + +#include + +/******************************* Constants ***********************************/ +/** Value to be used for disabling the low temperature threshold */ +#define AL_THERMAL_SENSOR_LOW_THRESHOLD_DISABLE -1000 +/** Value to be used for disabling the high temperature threshold */ +#define AL_THERMAL_SENSOR_HIGH_THRESHOLD_DISABLE 1000 + +/*********************** Data Structures and Types ***************************/ + +/** + * al_thermal_sensor_handle: data structure used by the HAL to handle thermal + * sensor functionality. this structure is allocated and set to zeros by the + * upper layer, then it is initialized by the al_thermal_sensor_init() that + * should be called before any other function of this API. later, this handle + * passed to the API functions. + */ +struct al_thermal_sensor_handle { + struct al_thermal_sensor_regs __iomem *regs; + uint32_t readout_raw; +}; + +/************************************* API ***********************************/ + +/** + * @brief Initialize a thermal sensor handle structure + * + * @param thermal_sensor_handle + * an allocated, non-initialized instance. + * @param thermal_sensor_reg_base + * the virtual base address of the thermal sensor internal + * registers + * + * @return 0 if no error found. + */ +int al_thermal_sensor_handle_init( + struct al_thermal_sensor_handle *thermal_sensor_handle, + void __iomem *thermal_sensor_reg_base); + +/** + * @brief Set SW digital trimming + * + * @param thermal_sensor_handle + * thermal_sensor handle + * @param trim + * the required auxiliary trimming (process dependent, usually + * obtained from eFuse, default value is 8) + */ +void al_thermal_sensor_trim_set( + struct al_thermal_sensor_handle *thermal_sensor_handle, + unsigned int trim); + +/** + * @brief Set low and high temperature thresholds for interrupt generation + * + * @param thermal_sensor_handle + * thermal_sensor handle + * @param low_temp_threshold + * low temperature threshold in celsius degrees + * a value of AL_THERMAL_SENSOR_LOW_THRESHOLD_DISABLE disables + * the threshold + * @param high_temp_threshold + * high temperature threshold in celsius degrees + * a value of AL_THERMAL_SENSOR_HIGH_THRESHOLD_DISABLE disables + * the threshold + */ +void al_thermal_sensor_threshold_config( + struct al_thermal_sensor_handle *thermal_sensor_handle, + int low_temp_threshold, + int high_temp_threshold); + +/** + * @brief Power up/down and enable/disable the thermal sensor unit + * + * @param thermal_sensor_handle + * thermal_sensor handle + * @param enable + * Whether to power-up and enable the thermal sensor or to disable + * it and power it down + */ +void al_thermal_sensor_enable_set( + struct al_thermal_sensor_handle *thermal_sensor_handle, + int enable); + +/** + * @brief Determine if the thermal sensor is ready + * + * @param thermal_sensor_handle + * thermal_sensor handle + * + * @return non zero if the thermal sensor is ready + */ +int al_thermal_sensor_is_ready( + struct al_thermal_sensor_handle *thermal_sensor_handle); + +/** + * @brief Trigger single sample + * + * @param thermal_sensor_handle + * thermal_sensor handle + */ +void al_thermal_sensor_trigger_once( + struct al_thermal_sensor_handle *thermal_sensor_handle); + +/** + * @brief Trigger continuous sampling + * + * @param thermal_sensor_handle + * thermal_sensor handle + */ +void al_thermal_sensor_trigger_continuous( + struct al_thermal_sensor_handle *thermal_sensor_handle); + +/** + * @brief Determine if the thermal sensor readout is valid + * + * @param thermal_sensor_handle + * thermal_sensor handle + * + * @return non zero if the thermal sensor readout is valid + */ +int al_thermal_sensor_readout_is_valid( + struct al_thermal_sensor_handle *thermal_sensor_handle); + +/** + * @brief Get the thermal sensor current readout - assuming valid + * + * @param thermal_sensor_handle + * thermal_sensor handle + * + * @return current thermal sensor readout [degrees celcius] + */ +int al_thermal_sensor_readout_get( + struct al_thermal_sensor_handle *thermal_sensor_handle); + +#endif +/** @} end of groupthermal_sensor group */ + diff --git a/target/linux/alpine/files/drivers/thermal/al/al_hal_thermal_sensor_regs.h b/target/linux/alpine/files/drivers/thermal/al/al_hal_thermal_sensor_regs.h new file mode 100644 index 00000000000000..11315f07c2775e --- /dev/null +++ b/target/linux/alpine/files/drivers/thermal/al/al_hal_thermal_sensor_regs.h @@ -0,0 +1,136 @@ +/******************************************************************************* +Copyright (C) 2013 Annapurna Labs Ltd. + +This file may be licensed under the terms of the Annapurna Labs Commercial +License Agreement. + +Alternatively, this file can be distributed under the terms of the GNU General +Public License V2 as published by the Free Software Foundation and can be +found at http://www.gnu.org/licenses/gpl-2.0.html + +Alternatively, redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + + +#ifndef __AL_THERMAL_SENSE_REGS_H__ +#define __AL_THERMAL_SENSE_REGS_H__ + +#ifdef __cplusplus +extern "C" { +#endif +/* +* Unit Registers +*/ + + + +struct al_thermal_sensor_unit { + uint32_t static_config; /* Thermal Sensor Static Configu ... */ + uint32_t threshold_config; /* Thermal Sensor reporting thre ... */ + uint32_t dynamic_config; /* Thermal Sensor flow triggers */ + uint32_t status; /* Thermal Sensor Status and Res ... */ +}; + +struct al_thermal_sensor_regs { + struct al_thermal_sensor_unit unit; +}; + + +/* +* Registers Fields +*/ + + +/**** static_config register ****/ +/* SW digital trimming offset */ +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_TRIM_MASK 0x0000000F +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_TRIM_SHIFT 0 +/* Power down of the temp sensor */ +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_POWER_DOWN (1 << 4) +/* Clock enable. Set onlyonce power-down is cleared. */ +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_ENABLE (1 << 5) +/* Analog vtrim enabling */ +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_CAL (1 << 6) +/* Reserved */ +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_RSRVD_7 (1 << 7) +/* Combined digital TRIM in case fuses are used to override the ... */ +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_CTRIM_MASK 0x00000F00 +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_CTRIM_SHIFT 8 +/* Clock divide ratio for 1uSec local clock needed by the sensor ... */ +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_CLK_RATIO_MASK 0x000FF000 +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_CLK_RATIO_SHIFT 12 +/* PowerUp Time Period before Reset Release */ +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_POWER_UP_COUNT_MASK 0x0FF00000 +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_POWER_UP_COUNT_SHIFT 20 +/* Init Time Period */ +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_INIT_COUNT_MASK 0xF0000000 +#define THERMAL_SENSE_UNIT_STATIC_CONFIG_INIT_COUNT_SHIFT 28 + +/**** threshold_config register ****/ +/* Threshold for Low-Temp indication and Interrupt assertion */ +#define THERMAL_SENSE_UNIT_THRESHOLD_CONFIG_LOW_TEMP_THRESHOLD_MASK 0x00000FFF +#define THERMAL_SENSE_UNIT_THRESHOLD_CONFIG_LOW_TEMP_THRESHOLD_SHIFT 0 +/* Reserved */ +#define THERMAL_SENSE_UNIT_THRESHOLD_CONFIG_RSRVD_15_12_MASK 0x0000F000 +#define THERMAL_SENSE_UNIT_THRESHOLD_CONFIG_RSRVD_15_12_SHIFT 12 +/* Threshold for High-Temp indication and Interrupt assertion */ +#define THERMAL_SENSE_UNIT_THRESHOLD_CONFIG_HIGH_TEMP_THRESHOLD_MASK 0x0FFF0000 +#define THERMAL_SENSE_UNIT_THRESHOLD_CONFIG_HIGH_TEMP_THRESHOLD_SHIFT 16 +/* Reserved */ +#define THERMAL_SENSE_UNIT_THRESHOLD_CONFIG_RSRVD_31_28_MASK 0xF0000000 +#define THERMAL_SENSE_UNIT_THRESHOLD_CONFIG_RSRVD_31_28_SHIFT 28 + +/**** dynamic_config register ****/ +/* Run Once command */ +#define THERMAL_SENSE_UNIT_DYNAMIC_CONFIG_RUN_ONCE (1 << 0) +/* Run Continuous command. Kept active till cleared by SW. */ +#define THERMAL_SENSE_UNIT_DYNAMIC_CONFIG_KEEP_RUNNING (1 << 1) +/* Reserved */ +#define THERMAL_SENSE_UNIT_DYNAMIC_CONFIG_RSRVD_31_2_MASK 0xFFFFFFFC +#define THERMAL_SENSE_UNIT_DYNAMIC_CONFIG_RSRVD_31_2_SHIFT 2 + +/**** status register ****/ +/* Thermal Sensor read result */ +#define THERMAL_SENSE_UNIT_STATUS_T_RESULT_MASK 0x00000FFF +#define THERMAL_SENSE_UNIT_STATUS_T_RESULT_SHIFT 0 +/* Temp is below Min. (Clear On Read) */ +#define THERMAL_SENSE_UNIT_STATUS_T_LOW (1 << 13) +/* Temp is above Max. (Clear On Read) */ +#define THERMAL_SENSE_UNIT_STATUS_T_HIGH (1 << 14) +/* Temp Result Valid . (Clear On Read) */ +#define THERMAL_SENSE_UNIT_STATUS_T_VALID (1 << 15) +/* Thermal Sensor Init Done. (4uSec after T_PWR_OK assertion) */ +#define THERMAL_SENSE_UNIT_STATUS_T_INIT_DONE (1 << 29) +/* Thermal Sensor powered up (about 50uSec after power up assertion ... */ +#define THERMAL_SENSE_UNIT_STATUS_T_PWR_OK (1 << 30) +/* Thermal Sensor is busy (in the middle of conversion sequence) */ +#define THERMAL_SENSE_UNIT_STATUS_T_BUSY (1 << 31) + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/target/linux/alpine/files/drivers/thermal/al/al_thermal.c b/target/linux/alpine/files/drivers/thermal/al/al_thermal.c new file mode 100644 index 00000000000000..d3e41c76726a73 --- /dev/null +++ b/target/linux/alpine/files/drivers/thermal/al/al_thermal.c @@ -0,0 +1,215 @@ +/* + * Annapurna Labs thermal driver. + * + * Copyright (C) 2013 Annapurna Labs + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include "../thermal_hwmon.h" + +#include "al_hal_thermal_sensor.h" + +#define MCELSIUS(temp) ((temp) * 1000) + +#define TIMEOUT_MS 1000 + +struct al_thermal_dev { + struct al_thermal_sensor_handle handle; +}; + +static inline int thermal_enable(struct al_thermal_sensor_handle *handle) +{ + int timeout; + + al_thermal_sensor_enable_set(handle, 1); + + for (timeout = 0; timeout < TIMEOUT_MS; timeout++) { + if (al_thermal_sensor_is_ready(handle)) + break; + udelay(1000); + } + if (timeout == TIMEOUT_MS) { + pr_err("%s: al_thermal_sensor_is_ready timed out!\n", __func__); + return -ETIME; + } + + al_thermal_sensor_trigger_continuous(handle); + + return 0; +} + +static inline int thermal_get_temp(struct thermal_zone_device *thermal, + int *temp) +{ + struct al_thermal_dev *al_dev = thermal->devdata; + int timeout; + + for (timeout = 0; timeout < TIMEOUT_MS; timeout++) { + if (al_thermal_sensor_readout_is_valid(&al_dev->handle)) + break; + udelay(1000); + } + if (timeout == TIMEOUT_MS) { + pr_err("%s: al_thermal_sensor_readout_is_valid timed out!\n", + __func__); + return -ETIME; + } + + *temp = MCELSIUS(al_thermal_sensor_readout_get(&al_dev->handle)); + + return 0; +} + +static struct thermal_zone_device_ops ops = { + .get_temp = thermal_get_temp, +}; + +#ifdef CONFIG_PM +static int al_thermal_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct thermal_zone_device *al_thermal = platform_get_drvdata(pdev); + struct al_thermal_dev *al_dev = al_thermal->devdata; + + /* Disable Annapurna Labs Thermal Sensor */ + al_thermal_sensor_enable_set(&al_dev->handle, 0); + + pr_info("%s: Suspended.\n", __func__); + + return 0; +} + +static int al_thermal_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct thermal_zone_device *al_thermal = platform_get_drvdata(pdev); + struct al_thermal_dev *al_dev = al_thermal->devdata; + int err = 0; + + /* Enable Annapurna Labs Thermal Sensor */ + err = thermal_enable(&al_dev->handle); + if (err) { + pr_err("%s: thermal_enable failed!\n", __func__); + return err; + } + + pr_info("%s: Resumed.\n", __func__); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(al_thermal_pm_ops, al_thermal_suspend, + al_thermal_resume); + +static int al_thermal_probe(struct platform_device *pdev) +{ + struct thermal_zone_device *al_thermal = NULL; + struct al_thermal_dev *al_dev; + struct device_node *np = pdev->dev.of_node; + struct resource *al_res = + platform_get_resource(pdev, IORESOURCE_MEM, 0); + void __iomem *thermal_base; + int err; + + if (!np) { + pr_err("%s: Failed: DT data not available\n", __func__); + return -EINVAL; + } + + if (!al_res) { + pr_err("%s: memory resource missing\n", __func__); + return -ENODEV; + } + + al_dev = devm_kzalloc(&pdev->dev, sizeof(*al_dev), GFP_KERNEL); + if (!al_dev) { + pr_err("%s: kzalloc fail\n", __func__); + return -ENOMEM; + } + + thermal_base = + devm_ioremap(&pdev->dev, al_res->start, resource_size(al_res)); + if (!thermal_base) { + pr_err("%s: ioremap failed\n", __func__); + return -ENOMEM; + } + + err = al_thermal_sensor_handle_init(&al_dev->handle, thermal_base); + if (err) { + pr_err("%s: al_thermal_sensor_init failed!\n", __func__); + return err; + } + + err = thermal_enable(&al_dev->handle); + if (err) { + pr_err("%s: thermal_enable failed!\n", __func__); + return err; + } + + al_thermal = devm_thermal_of_zone_register(&pdev->dev, 0, al_dev, &ops); + if (IS_ERR(al_thermal)) { + pr_err("%s: thermal zone device is NULL\n", __func__); + err = PTR_ERR(al_thermal); + return err; + } + + platform_set_drvdata(pdev, al_thermal); + + devm_thermal_add_hwmon_sysfs(&pdev->dev, al_thermal); + + pr_info("%s: Thermal Sensor Loaded at: 0x%p.\n", __func__, + thermal_base); + + return 0; +} + +static int al_thermal_exit(struct platform_device *pdev) +{ + struct thermal_zone_device *al_thermal = platform_get_drvdata(pdev); + struct al_thermal_dev *al_dev = al_thermal->devdata; + + thermal_zone_device_unregister(al_thermal); + platform_set_drvdata(pdev, NULL); + + al_thermal_sensor_enable_set(&al_dev->handle, 0); + + return 0; +} + +static const struct of_device_id al_thermal_id_table[] = { + { .compatible = "annapurna-labs,al-thermal" }, + {} +}; +MODULE_DEVICE_TABLE(of, al_thermal_id_table); + +static struct platform_driver al_thermal_driver = { + .probe = al_thermal_probe, + .remove = al_thermal_exit, + .driver = { + .name = "al_thermal", + .owner = THIS_MODULE, + .pm = &al_thermal_pm_ops, + .of_match_table = of_match_ptr(al_thermal_id_table), + }, +}; + +module_platform_driver(al_thermal_driver); + +MODULE_DESCRIPTION("Annapurna Labs thermal driver"); +MODULE_LICENSE("GPL"); diff --git a/target/linux/alpine/generic/target.mk b/target/linux/alpine/generic/target.mk new file mode 100644 index 00000000000000..f5cb1fb19b943f --- /dev/null +++ b/target/linux/alpine/generic/target.mk @@ -0,0 +1 @@ +BOARDNAME:=Generic diff --git a/target/linux/alpine/image/Makefile b/target/linux/alpine/image/Makefile new file mode 100644 index 00000000000000..856484805471b0 --- /dev/null +++ b/target/linux/alpine/image/Makefile @@ -0,0 +1,61 @@ +# Copyright (c) 2014 The Linux Foundation. All rights reserved. +# +include $(TOPDIR)/rules.mk +include $(INCLUDE_DIR)/image.mk + +DEVICE_VARS += NETGEAR_BOARD_ID NETGEAR_HW_ID + +define Device/Default + PROFILES := Default + KERNEL_DEPENDS = $$(wildcard $(DTS_DIR)/$$(DEVICE_DTS).dts) + KERNEL_LOADADDR = 0x8000 + IMAGES := sysupgrade.bin + IMAGE/sysupgrade.bin = sysupgrade-tar | append-metadata + IMAGE/sysupgrade.bin/squashfs := +endef + +define Device/DniImage + KERNEL_SUFFIX := -uImage + KERNEL = kernel-bin | append-dtb | uImage none + KERNEL_NAME := zImage + KERNEL_SIZE := 32768k + DEVICE_VENDOR := NETGEAR + DEVICE_PACKAGES := + NETGEAR_BOARD_ID := + NETGEAR_HW_ID := 29765235+0+512+1024+4x4+4x4 + SOC := alpine + UBINIZE_OPTS := -E 5 + BLOCKSIZE := 128k + PAGESIZE := 2048 + NAND_SIZE := 512m + IMAGES += factory.img dtb + IMAGE/factory.img := append-string -e '\xff\xff\xff\xff' | append-kernel | \ + pad-offset $$$$(BLOCKSIZE) 64 | append-uImage-fakehdr filesystem | \ + pad-to $$$$(KERNEL_SIZE) | append-ubi | netgear-dni + IMAGE/sysupgrade.bin := append-string -e '\xff\xff\xff\xff' | append-kernel | \ + pad-offset $$$$(BLOCKSIZE) 64 | append-uImage-fakehdr filesystem | \ + sysupgrade-tar kernel=$$$$@ | append-metadata + IMAGE/dtb := install-dtb +endef + +define Device/netgear_r9000 + $(call Device/DniImage) + DEVICE_MODEL := Nighthawk X10 R9000 + DEVICE_DTS := alpine-r9000 + NETGEAR_BOARD_ID := R9000 + BOARD_NAME := r9000 + SUPPORTED_DEVICES += r9000 +endef +TARGET_DEVICES += netgear_r9000 + +define Device/netgear_xr700 + $(call Device/DniImage) + DEVICE_MODEL := Nighthawk XR700 + DEVICE_DTS := alpine-xr700 + NETGEAR_BOARD_ID := XR700 + BOARD_NAME := xr700 + SUPPORTED_DEVICES += xr700 r9000 +endef +TARGET_DEVICES += netgear_xr700 + +$(eval $(call BuildImage)) diff --git a/target/linux/alpine/patches-6.6/010-alpine-r9000-dtb.patch b/target/linux/alpine/patches-6.6/010-alpine-r9000-dtb.patch new file mode 100644 index 00000000000000..3450b37a461e28 --- /dev/null +++ b/target/linux/alpine/patches-6.6/010-alpine-r9000-dtb.patch @@ -0,0 +1,19 @@ +Index: linux-6.6.22/arch/arm/boot/dts/Makefile +=================================================================== +--- linux-6.6.22.orig/arch/arm/boot/dts/Makefile ++++ linux-6.6.22/arch/arm/boot/dts/Makefile +@@ -3,7 +3,6 @@ subdir-y += actions + subdir-y += airoha + subdir-y += allwinner + subdir-y += alphascale +-subdir-y += amazon + subdir-y += amlogic + subdir-y += arm + subdir-y += aspeed +@@ -39,3 +38,6 @@ subdir-y += unisoc + subdir-y += vt8500 + subdir-y += xen + subdir-y += xilinx ++ ++dtb-$(CONFIG_ARCH_ALPINE) += \ ++ alpine-r9000.dtb diff --git a/target/linux/alpine/patches-6.6/020-arm-add-hwcc-support.patch b/target/linux/alpine/patches-6.6/020-arm-add-hwcc-support.patch new file mode 100644 index 00000000000000..c3c42d83c51bf1 --- /dev/null +++ b/target/linux/alpine/patches-6.6/020-arm-add-hwcc-support.patch @@ -0,0 +1,29 @@ +--- a/arch/arm/include/asm/device.h ++++ b/arch/arm/include/asm/device.h +@@ -10,6 +10,9 @@ struct dev_archdata { + struct dma_iommu_mapping *mapping; + #endif + unsigned int dma_ops_setup:1; ++#ifdef CONFIG_ARM_HWCC_FLAG ++ int hwcc; /* 1 - HW cache coherency, 0 - SW cache coherency */ ++#endif + }; + + struct omap_device; +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -187,6 +187,14 @@ config ARM_DMA_IOMMU_ALIGNMENT + + endif + ++config ARM_HWCC_FLAG ++ bool ++ default n ++ help ++ Select if you want the ARM specific device structure to inclue a flag ++ for determining whether HW cache coherency is enabled. ++ If unsure, say 'N'. ++ + config SYS_SUPPORTS_APM_EMULATION + bool + diff --git a/target/linux/alpine/patches-6.6/030-irq-gic-alpine-drop-intr-from-main-gic.patch b/target/linux/alpine/patches-6.6/030-irq-gic-alpine-drop-intr-from-main-gic.patch new file mode 100644 index 00000000000000..6163f55dc9f210 --- /dev/null +++ b/target/linux/alpine/patches-6.6/030-irq-gic-alpine-drop-intr-from-main-gic.patch @@ -0,0 +1,36 @@ +--- a/drivers/irqchip/irq-gic.c ++++ b/drivers/irqchip/irq-gic.c +@@ -706,6 +706,20 @@ void gic_cpu_restore(struct gic_chip_dat + gic_cpu_if_up(gic); + } + ++static void gic_cpu_mask(unsigned int gic_nr) ++{ ++ void __iomem *cpu_base; ++ ++ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); ++ ++ if (!cpu_base) ++ return; ++ ++ /* do not raise any interrupt from cpu interface. ++ * do not bypass to legacy_irq and legacy_fiq legs*/ ++ writel_relaxed(0 | (3<<5), cpu_base + GIC_CPU_CTRL); ++} ++ + static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) + { + int i; +@@ -729,6 +743,12 @@ static int gic_notifier(struct notifier_ + } + } + ++#ifdef CONFIG_ARCH_ALPINE ++ /*do not accept interrupt from main gic*/ ++ if (cmd == CPU_PM_ENTER) ++ gic_cpu_mask(0); ++#endif ++ + return NOTIFY_OK; + } + diff --git a/target/linux/alpine/patches-6.6/031-irq-gic-alpine-max-nr-two.patch b/target/linux/alpine/patches-6.6/031-irq-gic-alpine-max-nr-two.patch new file mode 100644 index 00000000000000..8699e175fc5ba3 --- /dev/null +++ b/target/linux/alpine/patches-6.6/031-irq-gic-alpine-max-nr-two.patch @@ -0,0 +1,10 @@ +--- a/drivers/irqchip/Kconfig ++++ b/drivers/irqchip/Kconfig +@@ -19,6 +19,7 @@ config ARM_GIC_MAX_NR + int + depends on ARM_GIC + default 2 if ARCH_REALVIEW ++ default 2 if ARCH_ALPINE + default 1 + + config ARM_GIC_V2M diff --git a/target/linux/alpine/patches-6.6/040-gpio-pl061-add-of-baseidx-support.patch b/target/linux/alpine/patches-6.6/040-gpio-pl061-add-of-baseidx-support.patch new file mode 100644 index 00000000000000..0dd0d425cc50aa --- /dev/null +++ b/target/linux/alpine/patches-6.6/040-gpio-pl061-add-of-baseidx-support.patch @@ -0,0 +1,21 @@ +--- a/drivers/gpio/gpio-pl061.c ++++ b/drivers/gpio/gpio-pl061.c +@@ -310,6 +310,7 @@ static int pl061_probe(struct amba_devic + struct pl061 *pl061; + struct gpio_irq_chip *girq; + int ret, irq; ++ const void *ptr; + + pl061 = devm_kzalloc(dev, sizeof(*pl061), GFP_KERNEL); + if (pl061 == NULL) +@@ -333,6 +334,10 @@ static int pl061_probe(struct amba_devic + pl061->gc.parent = dev; + pl061->gc.owner = THIS_MODULE; + ++ ptr = of_get_property(dev->of_node, "baseidx", NULL); ++ if (ptr) ++ pl061->gc.base = be32_to_cpup(ptr); ++ + /* + * irq_chip support + */ diff --git a/target/linux/alpine/patches-6.6/050-add-alpine-arm-arch-system-type.patch b/target/linux/alpine/patches-6.6/050-add-alpine-arm-arch-system-type.patch new file mode 100644 index 00000000000000..6ba6405f2d680e --- /dev/null +++ b/target/linux/alpine/patches-6.6/050-add-alpine-arm-arch-system-type.patch @@ -0,0 +1,41 @@ +--- a/arch/arm/mach-alpine/Kconfig ++++ b/arch/arm/mach-alpine/Kconfig +@@ -2,13 +2,26 @@ + config ARCH_ALPINE + bool "Annapurna Labs Alpine platform" + depends on ARCH_MULTI_V7 +- select ALPINE_MSI ++ select CPU_V7 ++ select HAVE_SMP + select ARM_AMBA ++ select SPARSE_IRQ + select ARM_GIC + select GENERIC_IRQ_CHIP ++ select GENERIC_IRQ_MULTI_HANDLER + select HAVE_ARM_ARCH_TIMER ++ select ARM_TIMER_SP804 ++ select CLKDEV_LOOKUP ++ select GENERIC_CLOCKEVENTS ++ select HAVE_CLK ++ select HAVE_ARCH_TIMERS ++ select COMMON_CLK ++ select HAVE_SCHED_CLOCK ++ select ARCH_SCHED_CLOCK + select MFD_SYSCON ++ select ARM_HWCC_FLAG ++ select ARM_HAS_SG_CHAIN ++ select ARCH_SUPPORTS_BIG_ENDIAN + select FORCE_PCI +- select PCI_HOST_GENERIC + help + This enables support for the Annapurna Labs Alpine V1 boards. +--- a/arch/arm/Makefile ++++ b/arch/arm/Makefile +@@ -229,6 +229,7 @@ machine-$(CONFIG_PLAT_SPEAR) += spear + machdirs-$(CONFIG_ARCH_FOOTBRIDGE) += arch/arm/mach-footbridge + machdirs-$(CONFIG_ARCH_RPC) += arch/arm/mach-rpc + machdirs-$(CONFIG_ARCH_SA1100) += arch/arm/mach-sa1100 ++machdirs-$(CONFIG_ARCH_ALPINE) += arch/arm/mach-alpine + KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%/include,$(machdirs-y)) + + # The byte offset of the kernel image in RAM from the start of RAM. diff --git a/target/linux/alpine/patches-6.6/051-add-alpine-low-level-debugging-port.patch b/target/linux/alpine/patches-6.6/051-add-alpine-low-level-debugging-port.patch new file mode 100644 index 00000000000000..cc6cd0e3c5c721 --- /dev/null +++ b/target/linux/alpine/patches-6.6/051-add-alpine-low-level-debugging-port.patch @@ -0,0 +1,41 @@ +--- a/arch/arm/Kconfig.debug ++++ b/arch/arm/Kconfig.debug +@@ -1470,6 +1470,13 @@ choice + options; the platform specific options are deprecated + and will be soon removed. + ++ config DEBUG_ALPINE_UART ++ bool "Kernel low-level debugging messages via ALPINE UART" ++ depends on ARCH_ALPINE ++ help ++ Say Y here if you want kernel low-level debugging support ++ on ALPINE based platforms. ++ + endchoice + + config DEBUG_AT91_UART +--- a/arch/arm/include/debug/8250.S ++++ b/arch/arm/include/debug/8250.S +@@ -6,10 +6,12 @@ + */ + #include + ++#ifdef CONFIG_DEBUG_UART_PHYS + .macro addruart, rp, rv, tmp + ldr \rp, =CONFIG_DEBUG_UART_PHYS + ldr \rv, =CONFIG_DEBUG_UART_VIRT + .endm ++#endif + + #ifdef CONFIG_DEBUG_UART_8250_WORD + .macro store, rd, rx:vararg +@@ -32,7 +34,9 @@ + .endm + #endif + ++#ifdef CONFIG_DEBUG_UART_8250_SHIFT + #define UART_SHIFT CONFIG_DEBUG_UART_8250_SHIFT ++#endif + + .macro senduart,rd,rx + store \rd, [\rx, #UART_TX << UART_SHIFT] diff --git a/target/linux/alpine/patches-6.6/060-add-alpine-cpuidle-support.patch b/target/linux/alpine/patches-6.6/060-add-alpine-cpuidle-support.patch new file mode 100644 index 00000000000000..30d8a13b941255 --- /dev/null +++ b/target/linux/alpine/patches-6.6/060-add-alpine-cpuidle-support.patch @@ -0,0 +1,24 @@ +--- a/drivers/cpuidle/Kconfig.arm ++++ b/drivers/cpuidle/Kconfig.arm +@@ -122,3 +122,11 @@ config ARM_QCOM_SPM_CPUIDLE + The Subsystem Power Manager (SPM) controls low power modes for the + CPU and L2 cores. It interface with various system drivers to put + the cores in low power modes. ++ ++config CPU_IDLE_ALPINE ++ bool "CPU Idle Driver for Annapurna Labs Alpine SOC" ++ depends on ARCH_ALPINE ++ default y ++ help ++ Select this to enable cpuidle on Annapurna Labs Alpine SOC. ++ If unsure say Y. +--- a/drivers/cpuidle/Makefile ++++ b/drivers/cpuidle/Makefile +@@ -26,6 +26,7 @@ obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuid + obj-$(CONFIG_ARM_PSCI_CPUIDLE_DOMAIN) += cpuidle-psci-domain.o + obj-$(CONFIG_ARM_TEGRA_CPUIDLE) += cpuidle-tegra.o + obj-$(CONFIG_ARM_QCOM_SPM_CPUIDLE) += cpuidle-qcom-spm.o ++obj-$(CONFIG_CPU_IDLE_ALPINE) += cpuidle-alpine.o + + ############################################################################### + # MIPS drivers diff --git a/target/linux/alpine/patches-6.6/070-add-pci-to-alpine-arch.patch b/target/linux/alpine/patches-6.6/070-add-pci-to-alpine-arch.patch new file mode 100644 index 00000000000000..5d086fcc619268 --- /dev/null +++ b/target/linux/alpine/patches-6.6/070-add-pci-to-alpine-arch.patch @@ -0,0 +1,14 @@ +--- a/arch/arm/mach-alpine/Kconfig ++++ b/arch/arm/mach-alpine/Kconfig +@@ -23,5 +23,11 @@ config ARCH_ALPINE + select ARM_HAS_SG_CHAIN + select ARCH_SUPPORTS_BIG_ENDIAN + select FORCE_PCI ++ select PCI_HOST_GENERIC ++ select PCI_DOMAINS ++ select PCI_DOMAINS_GENERIC ++ select ARCH_SUPPORTS_MSI ++ select ALPINE_MSI ++ select PCI_MSI_IRQ_DOMAIN + help + This enables support for the Annapurna Labs Alpine V1 boards. diff --git a/target/linux/alpine/patches-6.6/071-add-of-pci-range-iter-support.patch b/target/linux/alpine/patches-6.6/071-add-of-pci-range-iter-support.patch new file mode 100644 index 00000000000000..eae3f465623d06 --- /dev/null +++ b/target/linux/alpine/patches-6.6/071-add-of-pci-range-iter-support.patch @@ -0,0 +1,117 @@ +--- a/drivers/of/address.c ++++ b/drivers/of/address.c +@@ -672,6 +672,59 @@ const __be32 *__of_get_address(struct de + } + EXPORT_SYMBOL(__of_get_address); + ++struct of_pci_range_iter *of_pci_process_ranges(struct of_pci_range_iter *iter, ++ struct device_node *node) ++{ ++ const int na = 3, ns = 2; ++ int rlen; ++ ++ if (!iter->range) { ++ iter->pna = of_n_addr_cells(node); ++ iter->np = iter->pna + na + ns; ++ ++ iter->range = of_get_property(node, "ranges", &rlen); ++ if (iter->range == NULL) ++ return NULL; ++ ++ iter->end = iter->range + rlen / sizeof(__be32); ++ } ++ ++ if (iter->range + iter->np > iter->end) ++ return NULL; ++ ++ iter->pci_space = be32_to_cpup(iter->range); ++ iter->flags = of_bus_pci_get_flags(iter->range); ++ iter->pci_addr = of_read_number(iter->range + 1, ns); ++ iter->cpu_addr = of_translate_address(node, iter->range + na); ++ iter->size = of_read_number(iter->range + iter->pna + na, ns); ++ ++ iter->range += iter->np; ++ ++ /* Now consume following elements while they are contiguous */ ++ while (iter->range + iter->np <= iter->end) { ++ u32 flags, pci_space; ++ u64 pci_addr, cpu_addr, size; ++ ++ pci_space = be32_to_cpup(iter->range); ++ flags = of_bus_pci_get_flags(iter->range); ++ pci_addr = of_read_number(iter->range + 1, ns); ++ cpu_addr = of_translate_address(node, iter->range + na); ++ size = of_read_number(iter->range + iter->pna + na, ns); ++ ++ if (flags != iter->flags) ++ break; ++ if (pci_addr != iter->pci_addr + iter->size || ++ cpu_addr != iter->cpu_addr + iter->size) ++ break; ++ ++ iter->size += size; ++ iter->range += iter->np; ++ } ++ ++ return iter; ++} ++EXPORT_SYMBOL_GPL(of_pci_process_ranges); ++ + static int parser_init(struct of_pci_range_parser *parser, + struct device_node *node, const char *name) + { +--- a/include/linux/of_address.h ++++ b/include/linux/of_address.h +@@ -8,6 +8,30 @@ + + struct of_bus; + ++struct of_pci_range_iter { ++ const __be32 *range, *end; ++ int np, pna; ++ ++ u32 pci_space; ++ u64 pci_addr; ++ u64 cpu_addr; ++ u64 size; ++ u32 flags; ++}; ++ ++#define for_each_of_pci_range_compat(iter, np) \ ++ for (memset((iter), 0, sizeof(struct of_pci_range_iter)); \ ++ of_pci_process_ranges(iter, np);) ++ ++#define range_iter_fill_resource(iter, np, res) \ ++ do { \ ++ (res)->flags = (iter).flags; \ ++ (res)->start = (iter).cpu_addr; \ ++ (res)->end = (iter).cpu_addr + (iter).size - 1; \ ++ (res)->parent = (res)->child = (res)->sibling = NULL; \ ++ (res)->name = (np)->full_name; \ ++ } while (0) ++ + struct of_pci_range_parser { + struct device_node *node; + struct of_bus *bus; +@@ -67,6 +91,8 @@ extern int of_pci_range_to_resource(stru + struct device_node *np, + struct resource *res); + extern bool of_dma_is_coherent(struct device_node *np); ++struct of_pci_range_iter *of_pci_process_ranges(struct of_pci_range_iter *iter, ++ struct device_node *node); + #else /* CONFIG_OF_ADDRESS */ + static inline void __iomem *of_io_request_and_map(struct device_node *device, + int index, const char *name) +@@ -122,6 +148,12 @@ static inline bool of_dma_is_coherent(st + { + return false; + } ++ ++static inline struct of_pci_range_iter *of_pci_process_ranges(struct of_pci_range_iter *iter, ++ struct device_node *node) ++{ ++ return NULL; ++} + #endif /* CONFIG_OF_ADDRESS */ + + #ifdef CONFIG_OF diff --git a/target/linux/alpine/patches-6.6/072-of-irq-alpine-msi-override.patch b/target/linux/alpine/patches-6.6/072-of-irq-alpine-msi-override.patch new file mode 100644 index 00000000000000..ef3d9a259cdacc --- /dev/null +++ b/target/linux/alpine/patches-6.6/072-of-irq-alpine-msi-override.patch @@ -0,0 +1,31 @@ +--- a/drivers/of/irq.c ++++ b/drivers/of/irq.c +@@ -60,6 +60,11 @@ struct device_node *of_irq_find_parent(s + return NULL; + + do { ++#ifdef CONFIG_ARCH_ALPINE ++ if (!strncmp(child->name,"pcie-external",13) || !strncmp(child->name,"pcie-internal",13)) ++ p = of_get_parent(child); ++ else ++#endif + if (of_property_read_u32(child, "interrupt-parent", &parent)) { + p = of_get_parent(child); + } else { +@@ -701,8 +706,15 @@ struct irq_domain *of_msi_get_domain(str + { + struct device_node *msi_np; + struct irq_domain *d; +- ++#ifdef CONFIG_ARCH_ALPINE ++ struct device_node *msi_external; ++ msi_external = of_find_compatible_node(NULL, NULL, "annapurna-labs,al-msix"); + /* Check for a single msi-parent property */ ++ if (!strncmp(np->name,"pcie-external",13) || !strncmp(np->name,"pcie-internal",13)) { ++ printk(KERN_EMERG "msi override %s\n",np->name); ++ msi_np = msi_external; ++ } else ++#endif + msi_np = of_parse_phandle(np, "msi-parent", 0); + if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) { + d = irq_find_matching_host(msi_np, token); diff --git a/target/linux/alpine/patches-6.6/073-export-pcie_bus_configure_set.patch b/target/linux/alpine/patches-6.6/073-export-pcie_bus_configure_set.patch new file mode 100644 index 00000000000000..ad86b9ce9e87be --- /dev/null +++ b/target/linux/alpine/patches-6.6/073-export-pcie_bus_configure_set.patch @@ -0,0 +1,11 @@ +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -2794,7 +2794,7 @@ static void pcie_write_mrrs(struct pci_d + pci_err(dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n"); + } + +-static int pcie_bus_configure_set(struct pci_dev *dev, void *data) ++int pcie_bus_configure_set(struct pci_dev *dev, void *data) + { + int mps, orig_mps; + diff --git a/target/linux/alpine/patches-6.6/074-pci-quirks-enable-alpine-msi.patch b/target/linux/alpine/patches-6.6/074-pci-quirks-enable-alpine-msi.patch new file mode 100644 index 00000000000000..1076018b9f2c78 --- /dev/null +++ b/target/linux/alpine/patches-6.6/074-pci-quirks-enable-alpine-msi.patch @@ -0,0 +1,27 @@ +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -3121,24 +3121,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AT + quirk_msi_intx_disable_qca_bug); + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091, + quirk_msi_intx_disable_qca_bug); +- +-/* +- * Amazon's Annapurna Labs 1c36:0031 Root Ports don't support MSI-X, so it +- * should be disabled on platforms where the device (mistakenly) advertises it. +- * +- * Notice that this quirk also disables MSI (which may work, but hasn't been +- * tested), since currently there is no standard way to disable only MSI-X. +- * +- * The 0031 device id is reused for other non Root Port device types, +- * therefore the quirk is registered for the PCI_CLASS_BRIDGE_PCI class. +- */ +-static void quirk_al_msi_disable(struct pci_dev *dev) +-{ +- dev->no_msi = 1; +- pci_warn(dev, "Disabling MSI/MSI-X\n"); +-} +-DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, +- PCI_CLASS_BRIDGE_PCI, 8, quirk_al_msi_disable); + #endif /* CONFIG_PCI_MSI */ + + /* diff --git a/target/linux/alpine/patches-6.6/075-disable-default-irq-alpine-msi.patch b/target/linux/alpine/patches-6.6/075-disable-default-irq-alpine-msi.patch new file mode 100644 index 00000000000000..ba44ae265a043f --- /dev/null +++ b/target/linux/alpine/patches-6.6/075-disable-default-irq-alpine-msi.patch @@ -0,0 +1,10 @@ +--- a/drivers/irqchip/Makefile ++++ b/drivers/irqchip/Makefile +@@ -2,7 +2,6 @@ + obj-$(CONFIG_IRQCHIP) += irqchip.o + + obj-$(CONFIG_AL_FIC) += irq-al-fic.o +-obj-$(CONFIG_ALPINE_MSI) += irq-alpine-msi.o + obj-$(CONFIG_ATH79) += irq-ath79-cpu.o + obj-$(CONFIG_ATH79) += irq-ath79-misc.o + obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o diff --git a/target/linux/alpine/patches-6.6/080-add-alpine-edac-support.patch b/target/linux/alpine/patches-6.6/080-add-alpine-edac-support.patch new file mode 100644 index 00000000000000..772dfcb8ffdeaf --- /dev/null +++ b/target/linux/alpine/patches-6.6/080-add-alpine-edac-support.patch @@ -0,0 +1,9 @@ +--- a/drivers/edac/Kconfig ++++ b/drivers/edac/Kconfig +@@ -541,4 +541,6 @@ config EDAC_DMC520 + Support for error detection and correction on the + SoCs with ARM DMC-520 DRAM controller. + ++source "drivers/edac/al/Kconfig" ++ + endif # EDAC diff --git a/target/linux/alpine/patches-6.6/090-alpine-dma-raid-accel-engine.patch b/target/linux/alpine/patches-6.6/090-alpine-dma-raid-accel-engine.patch new file mode 100644 index 00000000000000..0d9ca6158a2259 --- /dev/null +++ b/target/linux/alpine/patches-6.6/090-alpine-dma-raid-accel-engine.patch @@ -0,0 +1,18 @@ +--- a/drivers/dma/Kconfig ++++ b/drivers/dma/Kconfig +@@ -780,6 +780,8 @@ source "drivers/dma/fsl-dpaa2-qdma/Kconf + + source "drivers/dma/lgm/Kconfig" + ++source "drivers/dma/al/Kconfig" ++ + # clients + comment "DMA Clients" + depends on DMA_ENGINE +--- a/drivers/dma/Makefile ++++ b/drivers/dma/Makefile +@@ -88,3 +88,4 @@ obj-y += mediatek/ + obj-y += qcom/ + obj-y += ti/ + obj-y += xilinx/ ++obj-y += al/ diff --git a/target/linux/alpine/patches-6.6/091-add-alpine-dma-pci-ids.patch b/target/linux/alpine/patches-6.6/091-add-alpine-dma-pci-ids.patch new file mode 100644 index 00000000000000..d81fe86011893a --- /dev/null +++ b/target/linux/alpine/patches-6.6/091-add-alpine-dma-pci-ids.patch @@ -0,0 +1,11 @@ +--- a/include/linux/pci_ids.h ++++ b/include/linux/pci_ids.h +@@ -2570,6 +2570,8 @@ + #define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c + + #define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 ++#define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_RAID_DMA 0x0021 ++#define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_RAID_DMA_VF 0x8021 + + #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 + #define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001 diff --git a/target/linux/alpine/patches-6.6/092-add-alpine-dma-pci-quirk-msi-intx-disable-bug.patch b/target/linux/alpine/patches-6.6/092-add-alpine-dma-pci-quirk-msi-intx-disable-bug.patch new file mode 100644 index 00000000000000..d59f2362ab1f72 --- /dev/null +++ b/target/linux/alpine/patches-6.6/092-add-alpine-dma-pci-quirk-msi-intx-disable-bug.patch @@ -0,0 +1,13 @@ +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -3121,6 +3121,10 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AT + quirk_msi_intx_disable_qca_bug); + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091, + quirk_msi_intx_disable_qca_bug); ++ ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, ++ PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_RAID_DMA, ++ quirk_msi_intx_disable_bug); + #endif /* CONFIG_PCI_MSI */ + + /* diff --git a/target/linux/alpine/patches-6.6/100-mtd-nand-alpine-support.patch b/target/linux/alpine/patches-6.6/100-mtd-nand-alpine-support.patch new file mode 100644 index 00000000000000..e2dee9d9b2467a --- /dev/null +++ b/target/linux/alpine/patches-6.6/100-mtd-nand-alpine-support.patch @@ -0,0 +1,20 @@ +--- a/drivers/mtd/nand/raw/Kconfig ++++ b/drivers/mtd/nand/raw/Kconfig +@@ -557,4 +557,6 @@ config MTD_NAND_DISKONCHIP_BBTWRITE + load time (assuming you build diskonchip as a module) with the module + parameter "inftl_bbt_write=1". + ++source "drivers/mtd/nand/raw/al/Kconfig" ++ + endif # MTD_RAW_NAND +--- a/drivers/mtd/nand/raw/Makefile ++++ b/drivers/mtd/nand/raw/Makefile +@@ -60,6 +60,8 @@ obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rock + obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o + obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o + ++obj-y += al/ ++ + nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o + nand-objs += nand_onfi.o + nand-objs += nand_jedec.o diff --git a/target/linux/alpine/patches-6.6/110-add-alpine-mdio-gpio-support.patch b/target/linux/alpine/patches-6.6/110-add-alpine-mdio-gpio-support.patch new file mode 100644 index 00000000000000..aafca0328e881b --- /dev/null +++ b/target/linux/alpine/patches-6.6/110-add-alpine-mdio-gpio-support.patch @@ -0,0 +1,25 @@ +--- a/drivers/net/mdio/Kconfig ++++ b/drivers/net/mdio/Kconfig +@@ -272,6 +272,12 @@ config MDIO_BUS_MUX_MMIOREG + + Currently, only 8/16/32 bits registers are supported. + ++config MDIO_AL_GPIO ++ tristate "Alpine MDIO GPIO bus controller" ++ depends on ARCH_ALPINE ++ help ++ This module provides a driver for the MDIO GPIO bus found in the ++ Alpine SoC. + + endif + endif +--- a/drivers/net/mdio/Makefile ++++ b/drivers/net/mdio/Makefile +@@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO_OCTEON) += mdio-octeo + obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o + obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o + obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o ++obj-$(CONFIG_MDIO_AL_GPIO) += mdio-al-gpio.o + + obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o + obj-$(CONFIG_MDIO_BUS_MUX_BCM6368) += mdio-mux-bcm6368.o diff --git a/target/linux/alpine/patches-6.6/120-add-alpine-ethernet-support.patch b/target/linux/alpine/patches-6.6/120-add-alpine-ethernet-support.patch new file mode 100644 index 00000000000000..93e73175aee192 --- /dev/null +++ b/target/linux/alpine/patches-6.6/120-add-alpine-ethernet-support.patch @@ -0,0 +1,16 @@ +--- a/drivers/net/ethernet/Kconfig ++++ b/drivers/net/ethernet/Kconfig +@@ -192,5 +192,6 @@ source "drivers/net/ethernet/via/Kconfig + source "drivers/net/ethernet/wiznet/Kconfig" + source "drivers/net/ethernet/xilinx/Kconfig" + source "drivers/net/ethernet/xircom/Kconfig" ++source "drivers/net/ethernet/al/Kconfig" + + endif # ETHERNET +--- a/drivers/net/ethernet/Makefile ++++ b/drivers/net/ethernet/Makefile +@@ -104,3 +104,4 @@ obj-$(CONFIG_NET_VENDOR_XILINX) += xilin + obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ + obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ + obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ ++obj-$(CONFIG_NET_AL_ETH) += al/ diff --git a/target/linux/alpine/patches-6.6/121-add-alpine-eth-pci-ids.patch b/target/linux/alpine/patches-6.6/121-add-alpine-eth-pci-ids.patch new file mode 100644 index 00000000000000..fc32338b0d57a8 --- /dev/null +++ b/target/linux/alpine/patches-6.6/121-add-alpine-eth-pci-ids.patch @@ -0,0 +1,13 @@ +--- a/include/linux/pci_ids.h ++++ b/include/linux/pci_ids.h +@@ -2570,6 +2570,10 @@ + #define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c + + #define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 ++#define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_ETH 0x0001 ++#define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_ETH_ADVANCED 0x0002 ++#define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_ETH_NIC 0x0003 ++#define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_ETH_FPGA_NIC 0x0030 + #define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_RAID_DMA 0x0021 + #define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_RAID_DMA_VF 0x8021 + diff --git a/target/linux/alpine/patches-6.6/122-add-alpine-eth-pci-quirk-msi-intx-disable-bug.patch b/target/linux/alpine/patches-6.6/122-add-alpine-eth-pci-quirk-msi-intx-disable-bug.patch new file mode 100644 index 00000000000000..63e65602030602 --- /dev/null +++ b/target/linux/alpine/patches-6.6/122-add-alpine-eth-pci-quirk-msi-intx-disable-bug.patch @@ -0,0 +1,12 @@ +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -3125,6 +3125,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AT + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, + PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_RAID_DMA, + quirk_msi_intx_disable_bug); ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, ++ PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_ETH, ++ quirk_msi_intx_disable_bug); + #endif /* CONFIG_PCI_MSI */ + + /* diff --git a/target/linux/alpine/patches-6.6/130-ar8327-preserve-port5-status.patch b/target/linux/alpine/patches-6.6/130-ar8327-preserve-port5-status.patch new file mode 100644 index 00000000000000..60cc16441449d1 --- /dev/null +++ b/target/linux/alpine/patches-6.6/130-ar8327-preserve-port5-status.patch @@ -0,0 +1,56 @@ +--- a/drivers/net/phy/ar8327.c ++++ b/drivers/net/phy/ar8327.c +@@ -541,6 +541,7 @@ ar8327_hw_config_pdata(struct ar8xxx_pri + priv->get_port_link = pdata->get_port_link; + + data->port0_status = ar8327_get_port_init_status(&pdata->port0_cfg); ++ data->port5_status = ar8327_get_port_init_status(&pdata->port5_cfg); + data->port6_status = ar8327_get_port_init_status(&pdata->port6_cfg); + + t = ar8327_get_pad_cfg(pdata->pad0_cfg); +@@ -634,6 +635,9 @@ ar8327_hw_config_of(struct ar8xxx_priv * + case AR8327_REG_PORT_STATUS(0): + data->port0_status = val; + break; ++ case AR8327_REG_PORT_STATUS(5): ++ data->port5_status = val; ++ break; + case AR8327_REG_PORT_STATUS(6): + data->port6_status = val; + break; +@@ -723,12 +727,14 @@ ar8327_init_port(struct ar8xxx_priv *pri + + if (port == AR8216_PORT_CPU) + t = data->port0_status; ++ else if (port == 5) ++ t = data->port5_status; + else if (port == 6) + t = data->port6_status; + else + t = AR8216_PORT_STATUS_LINK_AUTO; + +- if (port != AR8216_PORT_CPU && port != 6) { ++ if (port != AR8216_PORT_CPU && port != 5 && port != 6) { + /*hw limitation:if configure mac when there is traffic, + port MAC may work abnormal. Need disable lan&wan mac at fisrt*/ + ar8xxx_write(priv, AR8327_REG_PORT_STATUS(port), 0); +--- a/drivers/net/phy/ar8327.h ++++ b/drivers/net/phy/ar8327.h +@@ -321,6 +321,7 @@ struct ar8327_led { + + struct ar8327_data { + u32 port0_status; ++ u32 port5_status; + u32 port6_status; + + struct ar8327_led **leds; +--- a/include/linux/ar8216_platform.h ++++ b/include/linux/ar8216_platform.h +@@ -120,6 +120,7 @@ struct ar8327_platform_data { + struct ar8327_pad_cfg *pad6_cfg; + struct ar8327_sgmii_cfg *sgmii_cfg; + struct ar8327_port_cfg port0_cfg; ++ struct ar8327_port_cfg port5_cfg; + struct ar8327_port_cfg port6_cfg; + struct ar8327_led_cfg *led_cfg; + diff --git a/target/linux/alpine/patches-6.6/140-add-alpine-thermal-support.patch b/target/linux/alpine/patches-6.6/140-add-alpine-thermal-support.patch new file mode 100644 index 00000000000000..f127b39f75f51a --- /dev/null +++ b/target/linux/alpine/patches-6.6/140-add-alpine-thermal-support.patch @@ -0,0 +1,26 @@ +Index: linux-6.6.22/drivers/thermal/Makefile +=================================================================== +--- linux-6.6.22.orig/drivers/thermal/Makefile ++++ linux-6.6.22/drivers/thermal/Makefile +@@ -64,3 +64,4 @@ obj-$(CONFIG_AMLOGIC_THERMAL) += aml + obj-$(CONFIG_SPRD_THERMAL) += sprd_thermal.o + obj-$(CONFIG_KHADAS_MCU_FAN_THERMAL) += khadas_mcu_fan.o + obj-$(CONFIG_LOONGSON2_THERMAL) += loongson2_thermal.o ++obj-$(CONFIG_AL_THERMAL) += al/ +Index: linux-6.6.22/drivers/thermal/Kconfig +=================================================================== +--- linux-6.6.22.orig/drivers/thermal/Kconfig ++++ linux-6.6.22/drivers/thermal/Kconfig +@@ -522,4 +522,12 @@ config LOONGSON2_THERMAL + is higher than the high temperature threshold or lower than the low + temperature threshold, the interrupt will occur. + ++config AL_THERMAL ++ bool "Annapurna Labs thermal sensor driver" ++ depends on ARCH_ALPINE ++ depends on OF ++ help ++ Enable this to plug the Annapurna Labs thermal sensor driver into the ++ Linux thermal framework ++ + endif diff --git a/target/linux/alpine/patches-6.6/150-add-alpine-crypto-support.patch b/target/linux/alpine/patches-6.6/150-add-alpine-crypto-support.patch new file mode 100644 index 00000000000000..c5b9a1c13517dc --- /dev/null +++ b/target/linux/alpine/patches-6.6/150-add-alpine-crypto-support.patch @@ -0,0 +1,20 @@ +Index: linux-6.6.22/drivers/crypto/Kconfig +=================================================================== +--- linux-6.6.22.orig/drivers/crypto/Kconfig ++++ linux-6.6.22/drivers/crypto/Kconfig +@@ -797,4 +797,6 @@ config CRYPTO_DEV_SA2UL + source "drivers/crypto/aspeed/Kconfig" + source "drivers/crypto/starfive/Kconfig" + ++source "drivers/crypto/al/Kconfig" ++ + endif # CRYPTO_HW +Index: linux-6.6.22/drivers/crypto/Makefile +=================================================================== +--- linux-6.6.22.orig/drivers/crypto/Makefile ++++ linux-6.6.22/drivers/crypto/Makefile +@@ -51,3 +51,4 @@ obj-y += hisilicon/ + obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ + obj-y += intel/ + obj-y += starfive/ ++obj-$(CONFIG_CRYPTO_DEV_AL_CRYPTO) += al/ diff --git a/target/linux/alpine/patches-6.6/151-add-alpine-crypto-pci-ids.patch b/target/linux/alpine/patches-6.6/151-add-alpine-crypto-pci-ids.patch new file mode 100644 index 00000000000000..366f87155733bb --- /dev/null +++ b/target/linux/alpine/patches-6.6/151-add-alpine-crypto-pci-ids.patch @@ -0,0 +1,11 @@ +--- a/include/linux/pci_ids.h ++++ b/include/linux/pci_ids.h +@@ -2576,6 +2576,8 @@ + #define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_ETH_FPGA_NIC 0x0030 + #define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_RAID_DMA 0x0021 + #define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_RAID_DMA_VF 0x8021 ++#define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_CRYPTO 0x0011 ++#define PCI_DEVICE_ID_AMAZON_ANNAPURNA_LABS_CRYPTO_VF 0x8011 + + #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 + #define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001 diff --git a/target/linux/alpine/patches-6.6/152-fix-sg_split.patch b/target/linux/alpine/patches-6.6/152-fix-sg_split.patch new file mode 100644 index 00000000000000..977441c1dfd6bf --- /dev/null +++ b/target/linux/alpine/patches-6.6/152-fix-sg_split.patch @@ -0,0 +1,51 @@ +From 90349475a6fcbf874623b5de959326059fd31e16 Mon Sep 17 00:00:00 2001 +From: Alexander Egorenkov +Date: Sat, 17 Apr 2021 18:51:05 +0200 +Subject: [PATCH 1/3] lib: scatterlist: Fix SGL length in sg_split() if + !CONFIG_NEED_SG_DMA_LENGTH + +If CONFIG_NEED_SG_DMA_LENGTH is NOT enabled then sg_dma_len() is an alias +for the length field in a SGL. In that case sg_split() wrongly resets +the length of split SGLs to zero after it was set correctly before. + +Signed-off-by: Alexander Egorenkov +--- + lib/sg_split.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/lib/sg_split.c ++++ b/lib/sg_split.c +@@ -60,15 +60,16 @@ static int sg_calculate_split(struct sca + curr->length_last_sg = len; + size -= len; + } ++ ++ if (!nb_splits) ++ break; ++ + skip = 0; + + if (!size && --nb_splits > 0) { + curr++; + size = *(++sizes); + } +- +- if (!nb_splits) +- break; + } + + return (size || !splitters[0].in_sg0) ? -EINVAL : 0; +@@ -88,11 +89,10 @@ static void sg_split_phys(struct sg_spli + if (!j) { + out_sg->offset += split->skip_sg0; + out_sg->length -= split->skip_sg0; +- } else { +- out_sg->offset = 0; + } + sg_dma_address(out_sg) = 0; +- sg_dma_len(out_sg) = 0; ++ if (IS_ENABLED(CONFIG_NEED_SG_DMA_LENGTH)) ++ sg_dma_len(out_sg) = 0; + in_sg = sg_next(in_sg); + } + out_sg[-1].length = split->length_last_sg; diff --git a/target/linux/alpine/patches-6.6/160-add-alpine-fan-speed-controller-support.patch b/target/linux/alpine/patches-6.6/160-add-alpine-fan-speed-controller-support.patch new file mode 100644 index 00000000000000..a10e47c29e1c29 --- /dev/null +++ b/target/linux/alpine/patches-6.6/160-add-alpine-fan-speed-controller-support.patch @@ -0,0 +1,10 @@ +--- a/drivers/hwmon/g762.c ++++ b/drivers/hwmon/g762.c +@@ -45,6 +45,7 @@ + #define DRVNAME "g762" + + static const struct i2c_device_id g762_id[] = { ++ { "g761", 0 }, + { "g762", 0 }, + { "g763", 0 }, + { } diff --git a/target/linux/alpine/patches-6.6/170-alpine-xr700-dtb.patch b/target/linux/alpine/patches-6.6/170-alpine-xr700-dtb.patch new file mode 100644 index 00000000000000..a0dc949aa80976 --- /dev/null +++ b/target/linux/alpine/patches-6.6/170-alpine-xr700-dtb.patch @@ -0,0 +1,11 @@ +Index: linux-6.6.22/arch/arm/boot/dts/Makefile +=================================================================== +--- linux-6.6.22.orig/arch/arm/boot/dts/Makefile ++++ linux-6.6.22/arch/arm/boot/dts/Makefile +@@ -40,4 +40,5 @@ subdir-y += xen + subdir-y += xilinx + + dtb-$(CONFIG_ARCH_ALPINE) += \ +- alpine-r9000.dtb ++ alpine-r9000.dtb \ ++ alpine-xr700.dtb