diff --git a/README.md b/README.md index 47b2bfa090..8d4b6dc4a4 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,25 @@ -# NVIDIA Jetson driver +# NVIDIA Jetson driver (Beta) -Driver for Allied Vision Alvium MIPI cameras for NVIDIA Jetson with JetPack 5.1 (L4T 35.2.1) +Driver (Beta) for Allied Vision Alvium MIPI cameras for NVIDIA Jetson with JetPack 5.1.1 (L4T 35.3.1) https://developer.nvidia.com/embedded/jetpack ![Alvium camera](https://cdn.alliedvision.com/fileadmin/content/images/cameras/Alvium/various/alvium-cameras-models.png) +THE SOFTWARE IS PRELIMINARY AND STILL IN TESTING AND VERIFICATION PHASE AND IS PROVIDED ON AN “AS IS” AND “AS AVAILABLE” BASIS AND IS BELIEVED TO CONTAIN DEFECTS. A PRIMARY PURPOSE OF THIS EARLY ACCESS IS TO OBTAIN FEEDBACK ON PERFORMANCE AND THE IDENTIFICATION OF DEFECT SOFTWARE, HARDWARE AND DOCUMENTATION. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + ## Overview The scripts in this project build and install the Allied Vision MIPI camera driver to the NVIDIA Jetson boards. -Compatible platforms with JetPack 5.1 (L4T 35.2.1) : +Compatible platforms with JetPack 5.1.1 (L4T 35.3.1) : - AGX Orin Developer Kit - AGX Xavier DevKit - Xavier NX DevKit - - Auvidea carrier JNX30-PD with Xavier NX + - Auvidea carrier JNX30-PD with Xavier NX + - Orin Nano Developer Kit (**NEW**) ***Before starting the installation, make sure to create a backup of your Jetson system.*** @@ -23,9 +28,9 @@ Compatible platforms with JetPack 5.1 (L4T 35.2.1) : ### Host PC The scripts for the driver installation require Git on the host PC. -### Install JetPack 5.1 +### Install JetPack 5.1.1 -Install JetPack 5.1 (L4T 35.2.1) as per NVIDIA's instructions +Install JetPack 5.1.1 (L4T 35.3.1) as per NVIDIA's instructions https://developer.nvidia.com/embedded/jetpack Recommendation: Use NVIDIA SDK Manager to install JetPack and useful tools such as CUDA. @@ -41,7 +46,7 @@ https://docs.nvidia.com/sdk-manager/ Install the precompiled kernel, which includes the driver and an installation menu. 1. Extract the tarball on a host PC. - The tarball contains helper scripts and another tarball with the precompiled binaries named AlliedVision_NVidia_L4T_35.2.1.0_.tar.gz. + The tarball contains helper scripts and another tarball with the precompiled binaries named AlliedVision_NVidia_L4T_35.3.1.0_.tar.gz. 2. Copy the tarball to the target board. 3. On the target board, extract the tarball and run the included install script. diff --git a/avt_build/jetson_build/board.py b/avt_build/jetson_build/board.py index 2a62577d59..1293ff16d1 100644 --- a/avt_build/jetson_build/board.py +++ b/avt_build/jetson_build/board.py @@ -3,9 +3,9 @@ from . import upstream from . import build -AVT_RELEASE = "5.1.0" +AVT_RELEASE = "5.1.1" KERNEL_RELEASE = "5.10.104-tegra" -L4T_VERSION = "35.2.1" +L4T_VERSION = "35.3.1" FileSet = namedtuple('FileSet', [ 'driver_package', @@ -14,9 +14,9 @@ def get_tx2_agx_nx_upstream_files(UpstreamFile): - driver_package = UpstreamFile("https://developer.download.nvidia.com/embedded/L4T/r35_Release_v2.1/release/Jetson_Linux_R35.2.1_aarch64.tbz2", "9959bcd3de79de231a8fb54119f9cdb57a753542d44d994e346664028142d40d") + driver_package = UpstreamFile("https://developer.download.nvidia.com/embedded/L4T/r35_Release_v3.1/release/Jetson_Linux_R35.3.1_aarch64.tbz2", "80a55504a2ce9cdc0a328edb6fd0690615c4ffde9c2b32f4e64e0a1a3432a2e2") #rootfs = UpstreamFile("https://developer.nvidia.com/embedded/l4t/r32_release_v7.1/t186/tegra_linux_sample-root-filesystem_r32.7.1_aarch64.tbz2", "17996e861dd092043509e0b7e9ae038e271e5b0b7b78f26a34db4f03df2b12b8") - public_sources = UpstreamFile("https://developer.download.nvidia.com/embedded/L4T/r35_Release_v2.1/sources/public_sources.tbz2", "ae9d2f903347013a915b128cf311899a24c6ba21e13607cdbde785e1f0557449") + public_sources = UpstreamFile("https://developer.download.nvidia.com/embedded/L4T/r35_Release_v3.1/sources/public_sources.tbz2", "cd914110043cdb2a19a298fefc52d9dacbbcd560f781955fe03a1e98b470f2ae") return FileSet( public_sources=public_sources, @@ -35,11 +35,9 @@ def get_tx2_agx_nx_upstream_files(UpstreamFile): bootloader_payload_files_xavier = [ ("bootloader/payloads_t19x/bl_only_payload", "opt/ota_package/t19x/bl_only_payload"), - ("bootloader/payloads_t19x/bl_update_payload", "opt/ota_package/t19x/bl_update_payload"), ("bootloader/payloads_t19x/TEGRA_BL.Cap", "opt/ota_package/t19x/TEGRA_BL.Cap"), ("bootloader/BOOTAA64.efi", "opt/ota_package/t19x/BOOTAA64.efi"), ("bootloader/payloads_t23x/bl_only_payload", "opt/ota_package/t23x/bl_only_payload"), - ("bootloader/payloads_t23x/bl_update_payload", "opt/ota_package/t23x/bl_update_payload"), ("bootloader/payloads_t23x/TEGRA_BL.Cap", "opt/ota_package/t23x/TEGRA_BL.Cap"), ("bootloader/BOOTAA64.efi", "opt/ota_package/t23x/BOOTAA64.efi") ] diff --git a/avt_build/jetson_build/deploy.py b/avt_build/jetson_build/deploy.py index eb6ed8ed52..1d23cbce7b 100644 --- a/avt_build/jetson_build/deploy.py +++ b/avt_build/jetson_build/deploy.py @@ -42,7 +42,7 @@ def build_kernel_deb(args, board): logging.info("Adding display drivers") os.makedirs(board.build_dir / "Linux_for_Tegra/kernel/origin/display", exist_ok=True) t.execute(["tar","xf",board.build_dir / "Linux_for_Tegra/kernel/kernel_display_supplements.tbz2","-C",board.build_dir / "Linux_for_Tegra/kernel/origin/display"]) - shutil.copytree(board.build_dir / f"Linux_for_Tegra/kernel/origin/display/lib/modules/{KERNEL_RELEASE}/extra",board.build_dir / f"Linux_for_Tegra/kernel/avt/kernel/debian/out/lib/modules/{kernel_release}/extra") + t.execute(['sudo', 'cp', '-a', board.build_dir / f"Linux_for_Tegra/kernel/origin/display/lib/modules/{KERNEL_RELEASE}/extra", board.build_dir / f"Linux_for_Tegra/kernel/avt/kernel/debian/out/lib/modules/{kernel_release}/extra"]) for ef in board.kernel_extra_files: diff --git a/avt_build/jetson_build/files/bootloader/config b/avt_build/jetson_build/files/bootloader/config index 0ce797f92a..950c70b109 100644 --- a/avt_build/jetson_build/files/bootloader/config +++ b/avt_build/jetson_build/files/bootloader/config @@ -77,8 +77,11 @@ nx_devkit = BoardDefinition('Jetson Xavier NX devkit', '0x19', '3668', [ Configuration('Auvidea JNX30', 'tegra194-p3668-0000-p3509-0000-auvidea-jnx30.dtb', '0000'), Configuration('Auvidea JNX30', 'tegra194-p3668-0001-p3509-0000-auvidea-jnx30.dtb', '0001'), + Configuration('Auvidea JNX30D', 'tegra194-p3668-0000-p3509-0000-auvidea-jnx30d.dtb', '0000'), + Configuration('Auvidea JNX30D', 'tegra194-p3668-0001-p3509-0000-auvidea-jnx30d.dtb', '0001'), #NX 16GB Configuration('Auvidea JNX30', 'tegra194-p3668-0001-p3509-0000-auvidea-jnx30.dtb', '0003'), + Configuration('Auvidea JNX30D', 'tegra194-p3668-0001-p3509-0000-auvidea-jnx30d.dtb', '0003'), #Configuration('Auvidea JNX30+38491 FPD-Link (beta)', 'tegra194-p3668-all-p3509-0000-auvidea-jnx30-38491.dtb', '0000', beta=True, rootfs='/dev/mmcblk1p1'), #Configuration('Auvidea JNX30+38491 FPD-Link (beta)', 'tegra194-p3668-all-p3509-0000-auvidea-jnx30-38491.dtb', '0001', beta=True, rootfs='/dev/mmcblk1p1'), @@ -99,15 +102,20 @@ agx_orin_devkit = BoardDefinition('Jetson AGX Orin devkit', '0x23', '3701', [ Configuration('None', 'tegra234-p3701-0004-p3737-0000.dtb', '0004'), ]) -orin_nx_devkit = BoardDefinition('Jetson Orin NX devkit', '0x23', '3767', [ - Configuration('2 cameras', 'tegra234-p3767-0000-p3509-a02.dtb', '0000'), +orin_nx_nano_devkit = BoardDefinition('Jetson Orin Nano/NX devkit', '0x23', '3767', [ + Configuration('2 cameras', 'tegra234-p3767-0000-p3768-0000-a0.dtb', '0000'), + Configuration('2 cameras', 'tegra234-p3767-0001-p3768-0000-a0.dtb', '0001'), + + Configuration('2 cameras', 'tegra234-p3767-0003-p3768-0000-a0.dtb', '0003'), + Configuration('2 cameras', 'tegra234-p3767-0004-p3768-0000-a0.dtb', '0004'), + Configuration('2 cameras', 'tegra234-p3767-0003-p3768-0000-a0.dtb', '0005'), ]) boards = [ agx_devkit, nx_devkit, agx_orin_devkit, - orin_nx_devkit + orin_nx_nano_devkit ] _label_keywords = ['APPEND', 'FDT', 'FDTDIR', 'INITRD', 'LINUX', 'MENU'] diff --git a/avt_build/jetson_build/files/bootloader/control b/avt_build/jetson_build/files/bootloader/control index 0dcc77f9b9..8de03e8001 100644 --- a/avt_build/jetson_build/files/bootloader/control +++ b/avt_build/jetson_build/files/bootloader/control @@ -3,9 +3,9 @@ Maintainer: Allied Vision Technologies GmbH Package: avt-nvidia-l4t-bootloader Architecture: arm64 -Pre-Depends: nvidia-l4t-core (>> 35.2-0), nvidia-l4t-core (<< 35.3-0) -Depends: nvidia-l4t-tools (>> 35.2-0), nvidia-l4t-tools (<< 35.3-0), nvidia-l4t-init (>> 35.2-0), nvidia-l4t-init (<< 35.3-0) -Conflicts: nvidia-l4t-bootloader (<< 35.3-0) +Pre-Depends: nvidia-l4t-core (>> 35.3-0), nvidia-l4t-core (<< 35.4-0) +Depends: nvidia-l4t-tools (>> 35.3-0), nvidia-l4t-tools (<< 35.4-0), nvidia-l4t-init (>> 35.3-0), nvidia-l4t-init (<< 35.4-0) +Conflicts: nvidia-l4t-bootloader (<< 35.4-0) Section: bootloader Priority: standard Homepage: http://developer.nvidia.com/jetson diff --git a/avt_build/jetson_build/files/kernel-deb/control b/avt_build/jetson_build/files/kernel-deb/control index 6b8cb5b6c3..8421110bf5 100644 --- a/avt_build/jetson_build/files/kernel-deb/control +++ b/avt_build/jetson_build/files/kernel-deb/control @@ -3,9 +3,9 @@ Maintainer: Allied Vision Technologies GmbH Package: avt-nvidia-l4t-kernel Architecture: arm64 -Pre-Depends: nvidia-l4t-core (>> 35.2-0), nvidia-l4t-core (<< 35.3-0) -Depends: nvidia-l4t-tools (>> 35.2-0), nvidia-l4t-tools (<< 35.3-0) -Conflicts: nvidia-l4t-kernel (<< 35.3-0) +Pre-Depends: nvidia-l4t-core (>> 35.3-0), nvidia-l4t-core (<< 35.4-0) +Depends: nvidia-l4t-tools (>> 35.3-0), nvidia-l4t-tools (<< 35.4-0) +Conflicts: nvidia-l4t-kernel (<< 35.4-0) Section: kernel Priority: standard Homepage: http://developer.nvidia.com/jetson diff --git a/avt_build/jetson_build/files/kernel-dtb-deb/control b/avt_build/jetson_build/files/kernel-dtb-deb/control index 73df70f50d..bfd4e1e8e1 100644 --- a/avt_build/jetson_build/files/kernel-dtb-deb/control +++ b/avt_build/jetson_build/files/kernel-dtb-deb/control @@ -3,9 +3,9 @@ Maintainer: Allied Vision Technologies GmbH Package: avt-nvidia-l4t-kernel-dtbs Architecture: arm64 -Pre-Depends: nvidia-l4t-core (>> 35.2-0), nvidia-l4t-core (<< 35.3-0) +Pre-Depends: nvidia-l4t-core (>> 35.3-0), nvidia-l4t-core (<< 35.4-0) Depends: device-tree-compiler, avt-nvidia-l4t-kernel (= ${KERNEL_RELEASE}-${L4T_VERSION}-${AVT_RELEASE}) -Conflicts: nvidia-l4t-kernel-dtbs (<< 35.3-0) +Conflicts: nvidia-l4t-kernel-dtbs (<< 35.4-0) Section: kernel Priority: standard Homepage: http://developer.nvidia.com/jetson diff --git a/avt_build/jetson_build/files/kernel-headers-deb/control b/avt_build/jetson_build/files/kernel-headers-deb/control index 69465fb2bb..6b664bdb52 100644 --- a/avt_build/jetson_build/files/kernel-headers-deb/control +++ b/avt_build/jetson_build/files/kernel-headers-deb/control @@ -3,9 +3,9 @@ Maintainer: Allied Vision Technologies GmbH Package: avt-nvidia-l4t-kernel-headers Architecture: arm64 -Pre-Depends: nvidia-l4t-core (>> 35.2-0), nvidia-l4t-core (<< 35.3-0) +Pre-Depends: nvidia-l4t-core (>> 35.3-0), nvidia-l4t-core (<< 35.4-0) Depends: libc6, avt-nvidia-l4t-kernel (= ${KERNEL_RELEASE}-${L4T_VERSION}-${AVT_RELEASE}) -Conflicts: nvidia-l4t-kernel-headers (<< 35.3-0) +Conflicts: nvidia-l4t-kernel-headers (<< 35.4-0) Section: kernel Priority: standard Homepage: http://developer.nvidia.com/jetson diff --git a/hardware/nvidia/platform/t19x/common/kernel-dts/t19x-common-modules/tegra194-cvm-e3360-0000-a00.dtsi b/hardware/nvidia/platform/t19x/common/kernel-dts/t19x-common-modules/tegra194-cvm-e3360-0000-a00.dtsi index bb27d73c3c..66e526f169 100644 --- a/hardware/nvidia/platform/t19x/common/kernel-dts/t19x-common-modules/tegra194-cvm-e3360-0000-a00.dtsi +++ b/hardware/nvidia/platform/t19x/common/kernel-dts/t19x-common-modules/tegra194-cvm-e3360-0000-a00.dtsi @@ -1,5 +1,5 @@ /* -* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. +* Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -32,6 +32,10 @@ #address-cells = <2>; #size-cells = <2>; + nvpmodel { + status = "okay"; + }; + pmc@c360000 { #if TEGRA_PMC_VERSION >= DT_VERSION_2 nvidia,invert-interrupt; diff --git a/hardware/nvidia/platform/t19x/common/kernel-dts/t19x-common-modules/tegra194-cvm-p2888-0000-a00.dtsi b/hardware/nvidia/platform/t19x/common/kernel-dts/t19x-common-modules/tegra194-cvm-p2888-0000-a00.dtsi index d1278c4ed0..6dd7c7f727 100644 --- a/hardware/nvidia/platform/t19x/common/kernel-dts/t19x-common-modules/tegra194-cvm-p2888-0000-a00.dtsi +++ b/hardware/nvidia/platform/t19x/common/kernel-dts/t19x-common-modules/tegra194-cvm-p2888-0000-a00.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -32,6 +32,10 @@ #address-cells = <2>; #size-cells = <2>; + nvpmodel { + status = "okay"; + }; + pmc@c360000 { #if TEGRA_PMC_VERSION >= DT_VERSION_2 nvidia,invert-interrupt; diff --git a/hardware/nvidia/platform/t19x/common/kernel-dts/t19x-common-platforms/tegra194-platforms-eqos.dtsi b/hardware/nvidia/platform/t19x/common/kernel-dts/t19x-common-platforms/tegra194-platforms-eqos.dtsi index 428ceb1c3c..3715f9aded 100644 --- a/hardware/nvidia/platform/t19x/common/kernel-dts/t19x-common-platforms/tegra194-platforms-eqos.dtsi +++ b/hardware/nvidia/platform/t19x/common/kernel-dts/t19x-common-platforms/tegra194-platforms-eqos.dtsi @@ -36,8 +36,8 @@ nvidia,mtl-queues = <0>; nvidia,rx-queue-prio = <0x2>; nvidia,tx-queue-prio = <0x0>; - /* 0=enable, 1=disable */ - nvidia,pause_frames = <0>; + /* 1=enable, 0=disable */ + nvidia,pause_frames = <1>; nvidia,phy-reset-gpio = <&tegra_main_gpio TEGRA194_MAIN_GPIO(G, 5) 0>; phy-mode = "rgmii-id"; phy-handle = <&phy>; diff --git a/hardware/nvidia/platform/t19x/jakku/kernel-dts/Makefile b/hardware/nvidia/platform/t19x/jakku/kernel-dts/Makefile index 6c857f62d1..62bc49b023 100644 --- a/hardware/nvidia/platform/t19x/jakku/kernel-dts/Makefile +++ b/hardware/nvidia/platform/t19x/jakku/kernel-dts/Makefile @@ -13,9 +13,11 @@ dtb-$(BUILD_ENABLE) += tegra194-p3668-all-p3509-0000.dtb dtb-$(BUILD_ENABLE) += tegra194-p3668-0000-p3509-0000.dtb dtb-$(BUILD_ENABLE) += tegra194-p3668-0000-p3509-0000-avt.dtb dtb-$(BUILD_ENABLE) += tegra194-p3668-0000-p3509-0000-auvidea-jnx30.dtb +dtb-$(BUILD_ENABLE) += tegra194-p3668-0000-p3509-0000-auvidea-jnx30d.dtb dtb-$(BUILD_ENABLE) += tegra194-p3668-0001-p3509-0000.dtb dtb-$(BUILD_ENABLE) += tegra194-p3668-0001-p3509-0000-avt.dtb dtb-$(BUILD_ENABLE) += tegra194-p3668-0001-p3509-0000-auvidea-jnx30.dtb +dtb-$(BUILD_ENABLE) += tegra194-p3668-0001-p3509-0000-auvidea-jnx30d.dtb dtb-$(CONFIG_ARCH_TEGRA_19x_SOC) += tegra194-p3668-all-p3509-0000-kexec.dtb dtbo-$(BUILD_ENABLE) += tegra194-p3668-all-p3509-0000-hdr40.dtbo dtbo-$(BUILD_ENABLE) += tegra194-p3668-all-p3509-0000-adafruit-sph0645lm4h.dtbo diff --git a/hardware/nvidia/platform/t19x/jakku/kernel-dts/common/tegra194-camera-jakku-avt-auvidea-jnx30d-38486.dtsi b/hardware/nvidia/platform/t19x/jakku/kernel-dts/common/tegra194-camera-jakku-avt-auvidea-jnx30d-38486.dtsi new file mode 100644 index 0000000000..c6752e69f2 --- /dev/null +++ b/hardware/nvidia/platform/t19x/jakku/kernel-dts/common/tegra194-camera-jakku-avt-auvidea-jnx30d-38486.dtsi @@ -0,0 +1,292 @@ +/* + * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +/ { + + tegra-capture-vi { + num-channels = <2>; + ports { + #address-cells = <1>; + #size-cells = <0>; + vi_port0: port@0 { + reg = <0>; + avt_csi2_vi_in0: endpoint { + port-index = <0>; + bus-width = <4>; + remote-endpoint = <&avt_csi2_csi_out0>; + }; + }; + vi_port1: port@1 { + reg = <1>; + avt_csi2_vi_in1: endpoint { + port-index = <4>; + bus-width = <4>; + remote-endpoint = <&avt_csi2_csi_out1>; + }; + }; + }; + }; + host1x@13e00000 { + nvcsi@15a00000 { + num-channels = <2>; + #address-cells = <1>; + #size-cells = <0>; + csi_chan0: channel@0 { + reg = <0>; + ports { + #address-cells = <1>; + #size-cells = <0>; + csi_chan0_port0: port@0 { + reg = <0>; + avt_csi2_csi_in0: endpoint@0 { + port-index = <0>; + bus-width = <4>; + remote-endpoint = <&avt_csi2_out0>; + }; + }; + csi_chan0_port1: port@1 { + reg = <1>; + avt_csi2_csi_out0: endpoint@1 { + remote-endpoint = <&avt_csi2_vi_in0>; + }; + }; + }; + }; + csi_chan1: channel@1 { + reg = <1>; + ports { + #address-cells = <1>; + #size-cells = <0>; + csi_chan1_port0: port@0 { + reg = <0>; + avt_csi2_csi_in1: endpoint@0 { + port-index = <4>; + bus-width = <4>; + remote-endpoint = <&avt_csi2_out1>; + }; + }; + csi_chan1_port1: port@1 { + reg = <1>; + avt_csi2_csi_out1: endpoint@1 { + remote-endpoint = <&avt_csi2_vi_in1>; + }; + }; + }; + }; + }; + }; + + i2c@3180000 { + avt_csi2@3c { + compatible = "alliedvision,avt_csi2"; + /* I2C device address */ + reg = <0x3c>; + + status = "okay"; + + /* V4L2 device node location */ + devnode = "video0"; + + mode0 { + num_lanes = "4"; + tegra_sinterface = "serial_a"; + discontinuous_clk = "no"; + cil_settletime = "0"; + embedded_metadata_height = "0"; + + /* not verified: */ + mclk_khz = "24000"; + phy_mode = "DPHY"; + dpcm_enable = "false"; + + active_w = "5488"; + active_h = "4112"; + pixel_t = "bayer_bggr"; + csi_pixel_bit_depth = "4"; + readout_orientation = "0"; + line_length = "5488"; + inherent_gain = "1"; + mclk_multiplier = "31.25"; + pix_clk_hz = "750000000"; + + gain_factor = "16"; + framerate_factor = "1000000"; + exposure_factor = "1000000"; + min_gain_val = "16"; /* 1.0 */ + max_gain_val = "256"; /* 16.0 */ + step_gain_val = "1"; /* 0.125 */ + min_hdr_ratio = "1"; + max_hdr_ratio = "64"; + min_framerate = "1500000"; /* 1.5 */ + max_framerate = "30000000"; /* 30 */ + step_framerate = "1"; + min_exp_time = "34"; /* us */ + max_exp_time = "550385"; /* us */ + step_exp_time = "1"; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + avt_csi2_out0: endpoint { + port-index = <0>; + bus-width = <4>; + remote-endpoint = <&avt_csi2_csi_in0>; + }; + }; + }; + }; + }; + + i2c@c240000 { + avt_csi2@3c { + compatible = "alliedvision,avt_csi2"; + /* I2C device address */ + reg = <0x3c>; + + status = "okay"; + + /* V4L2 device node location */ + devnode = "video1"; + + mode0 { + num_lanes = "4"; + tegra_sinterface = "serial_c"; + discontinuous_clk = "no"; + cil_settletime = "0"; + embedded_metadata_height = "0"; + + /* not verified: */ + mclk_khz = "24000"; + phy_mode = "DPHY"; + dpcm_enable = "false"; + + active_w = "5488"; + active_h = "4112"; + pixel_t = "bayer_bggr"; + csi_pixel_bit_depth = "4"; + readout_orientation = "0"; + line_length = "5488"; + inherent_gain = "1"; + mclk_multiplier = "31.25"; + pix_clk_hz = "750000000"; + + gain_factor = "16"; + framerate_factor = "1000000"; + exposure_factor = "1000000"; + min_gain_val = "16"; /* 1.0 */ + max_gain_val = "256"; /* 16.0 */ + step_gain_val = "1"; /* 0.125 */ + min_hdr_ratio = "1"; + max_hdr_ratio = "64"; + min_framerate = "1500000"; /* 1.5 */ + max_framerate = "30000000"; /* 30 */ + step_framerate = "1"; + min_exp_time = "34"; /* us */ + max_exp_time = "550385"; /* us */ + step_exp_time = "1"; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + avt_csi2_out1: endpoint { + port-index = <4>; + bus-width = <4>; + remote-endpoint = <&avt_csi2_csi_in1>; + }; + }; + }; + }; + }; + + tcp: tegra-camera-platform { + compatible = "nvidia, tegra-camera-platform"; + /** + * Physical settings to calculate max ISO BW + * + * num_csi_lanes = <>; + * Total number of CSI lanes when all cameras are active + * + * max_lane_speed = <>; + * Max lane speed in Kbit/s + * + * min_bits_per_pixel = <>; + * Min bits per pixel + * + * vi_peak_byte_per_pixel = <>; + * Max byte per pixel for the VI ISO case + * + * vi_bw_margin_pct = <>; + * Vi bandwidth margin in percentage + * + * max_pixel_rate = <>; + * Max pixel rate in Kpixel/s for the ISP ISO case + * + * isp_peak_byte_per_pixel = <>; + * Max byte per pixel for the ISP ISO case + * + * isp_bw_margin_pct = <>; + * Isp bandwidth margin in percentage + */ + num_csi_lanes = <8>; + max_lane_speed = <1500000>; + min_bits_per_pixel = <10>; + vi_peak_byte_per_pixel = <2>; + vi_bw_margin_pct = <25>; + max_pixel_rate = <240000>; + isp_peak_byte_per_pixel = <5>; + isp_bw_margin_pct = <25>; + + /** + * The general guideline for naming badge_info contains 3 parts, and is as follows, + * The first part is the camera_board_id for the module; if the module is in a FFD + * platform, then use the platform name for this part. + * The second part contains the position of the module, ex. "rear" or "front". + * The third part contains the last 6 characters of a part number which is found + * in the module's specsheet from the vendor. + */ + modules { + cam_module0: module0 { + badge = "jakku_front_RBP194"; + position = "front"; + orientation = "1"; + cam_module0_drivernode0: drivernode0 { + pcl_id = "v4l2_sensor"; + devname = "imx219 9-0010"; + proc-device-tree = "/proc/device-tree/cam_i2cmux/i2c@0/rbpcv2_imx219_a@10"; + }; + }; + cam_module1: module1 { + badge = "jakku_rear_RBP194"; + position = "rear"; + orientation = "1"; + cam_module1_drivernode0: drivernode0 { + pcl_id = "v4l2_sensor"; + devname = "imx219 10-0010"; + proc-device-tree = "/proc/device-tree/cam_i2cmux/i2c@1/rbpcv2_imx219_c@10"; + }; + }; + }; + }; +}; + diff --git a/hardware/nvidia/platform/t19x/jakku/kernel-dts/common/tegra194-camera-jakku-avt-auvidea-jnx30d.dtsi b/hardware/nvidia/platform/t19x/jakku/kernel-dts/common/tegra194-camera-jakku-avt-auvidea-jnx30d.dtsi new file mode 100644 index 0000000000..58f8ce31ff --- /dev/null +++ b/hardware/nvidia/platform/t19x/jakku/kernel-dts/common/tegra194-camera-jakku-avt-auvidea-jnx30d.dtsi @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "tegra194-camera-avt-csi2.dtsi" + +#define CAM_I2C_MUX TEGRA194_AON_GPIO(CC, 3) + +/ { + cam_i2cmux { + compatible = "i2c-mux-gpio"; + #address-cells = <1>; + #size-cells = <0>; + i2c-parent = <&cam_i2c>; + mux-gpios = <&tegra_aon_gpio CAM_I2C_MUX GPIO_ACTIVE_HIGH>; + i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + }; + i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; +}; + diff --git a/hardware/nvidia/platform/t19x/jakku/kernel-dts/common/tegra194-p3668-common.dtsi b/hardware/nvidia/platform/t19x/jakku/kernel-dts/common/tegra194-p3668-common.dtsi index ae19fcd3e5..9b8cef97d5 100644 --- a/hardware/nvidia/platform/t19x/jakku/kernel-dts/common/tegra194-p3668-common.dtsi +++ b/hardware/nvidia/platform/t19x/jakku/kernel-dts/common/tegra194-p3668-common.dtsi @@ -1,7 +1,7 @@ /* * Common include DTS file for CVM:P3668-0001 and CVB:P3449-0000 variants. * - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -614,6 +614,10 @@ }; }; + nvpmodel { + status = "okay"; + }; + soctherm-oc-event { status = "okay"; }; diff --git a/hardware/nvidia/platform/t19x/jakku/kernel-dts/tegra194-p3668-0000-p3509-0000-auvidea-jnx30d.dts b/hardware/nvidia/platform/t19x/jakku/kernel-dts/tegra194-p3668-0000-p3509-0000-auvidea-jnx30d.dts new file mode 100644 index 0000000000..e3cd72c686 --- /dev/null +++ b/hardware/nvidia/platform/t19x/jakku/kernel-dts/tegra194-p3668-0000-p3509-0000-auvidea-jnx30d.dts @@ -0,0 +1,27 @@ +/* + * Top level DTS file for CVM:P3668-0001 and CVB:P3509-0000. + * + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +/dts-v1/; +#include "common/tegra194-p3668-common.dtsi" +#include "common/tegra194-p3509-0000-a00.dtsi" +#include "common/tegra194-camera-jakku-avt-auvidea-jnx30d-38486.dtsi" +#include "common/tegra194-auvidea-jnx30-sdcard.dtsi" + +/ { + nvidia,dtsfilename = __FILE__; + nvidia,dtbbuildtime = __DATE__, __TIME__; + + compatible = "nvidia,p3449-0000+p3668-0000", "nvidia,p3509-0000+p3668-0000", "nvidia,tegra194"; +}; diff --git a/hardware/nvidia/platform/t19x/jakku/kernel-dts/tegra194-p3668-0001-p3509-0000-auvidea-jnx30d.dts b/hardware/nvidia/platform/t19x/jakku/kernel-dts/tegra194-p3668-0001-p3509-0000-auvidea-jnx30d.dts new file mode 100644 index 0000000000..8d5359325d --- /dev/null +++ b/hardware/nvidia/platform/t19x/jakku/kernel-dts/tegra194-p3668-0001-p3509-0000-auvidea-jnx30d.dts @@ -0,0 +1,27 @@ +/* + * Top level DTS file for CVM:P3668-0001 and CVB:P3509-0000. + * + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +/dts-v1/; +#include "common/tegra194-p3668-common.dtsi" +#include "common/tegra194-p3509-0000-a00.dtsi" +#include "common/tegra194-camera-jakku-avt-auvidea-jnx30d-38486.dtsi" +#include "common/tegra194-auvidea-jnx30-sdcard.dtsi" + +/ { + nvidia,dtsfilename = __FILE__; + nvidia,dtbbuildtime = __DATE__, __TIME__; + + compatible = "nvidia,p3449-0000+p3668-0001", "nvidia,p3509-0000+p3668-0001", "nvidia,tegra194"; +}; diff --git a/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-cpufreq-pair-cooling.dtsi b/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-cpufreq-pair-cooling.dtsi new file mode 100644 index 0000000000..af6177c18f --- /dev/null +++ b/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-cpufreq-pair-cooling.dtsi @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/ { + cpus { + cpu@2 { + #cooling-cells = <2>; + }; + cpu@6 { + #cooling-cells = <2>; + }; + cpu@10 { + #cooling-cells = <2>; + }; + }; + + thermal-zones { + CPU-therm { + cooling-maps { + map1 { + trip = <&cpu_sw_throttle>; + cooling-device = <&cl0_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl1_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl2_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + GPU-therm { + cooling-maps { + map1 { + trip = <&gpu_sw_throttle>; + cooling-device = <&cl0_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl1_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl2_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + CV0-therm { + cooling-maps { + map1 { + trip = <&cv0_sw_throttle>; + cooling-device = <&cl0_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl1_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl2_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + CV1-therm { + cooling-maps { + map1 { + trip = <&cv1_sw_throttle>; + cooling-device = <&cl0_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl1_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl2_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + CV2-therm { + cooling-maps { + map1 { + trip = <&cv2_sw_throttle>; + cooling-device = <&cl0_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl1_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl2_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + SOC0-therm { + cooling-maps { + map1 { + trip = <&soc0_sw_throttle>; + cooling-device = <&cl0_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl1_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl2_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + SOC1-therm { + cooling-maps { + map1 { + trip = <&soc1_sw_throttle>; + cooling-device = <&cl0_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl1_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl2_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + SOC2-therm { + cooling-maps { + map1 { + trip = <&soc2_sw_throttle>; + cooling-device = <&cl0_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl1_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cl2_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + }; +}; diff --git a/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-cvm-p3701.dtsi b/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-cvm-p3701.dtsi index 82e79e77b1..520cafd36c 100644 --- a/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-cvm-p3701.dtsi +++ b/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-cvm-p3701.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -208,6 +208,10 @@ }; }; + nvpmodel { + status = "okay"; + }; + soctherm-oc-event { status = "okay"; }; diff --git a/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-cvm-p3767.dtsi b/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-cvm-p3767.dtsi index 26aa8cdb5a..a89a3c91f8 100644 --- a/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-cvm-p3767.dtsi +++ b/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-cvm-p3767.dtsi @@ -184,6 +184,7 @@ status = "okay"; }; linux,cma { + size = <0x0 0x10000000>; /* 256 MB */ status = "okay"; }; vpr-carveout { @@ -191,6 +192,11 @@ }; }; + tegra-carveouts { + memory-region = <&vpr>; + status = "okay"; + }; + tegra-cache { status = "okay"; }; @@ -268,6 +274,10 @@ }; }; + nvpmodel { + status = "okay"; + }; + soctherm-oc-event { status = "okay"; }; diff --git a/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-p3767-pcie-max-speed-gen3.dtsi b/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-p3767-pcie-max-speed-gen3.dtsi new file mode 100644 index 0000000000..3783f38738 --- /dev/null +++ b/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-cvm/tegra234-p3767-pcie-max-speed-gen3.dtsi @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +/ { + /* C1 X1 */ + pcie@14100000 { + max-link-speed = <0x3>; + }; + + /* C4 X4 */ + pcie@14160000 { + max-link-speed = <0x3>; + }; + + /* C7 X8*/ + pcie@141e0000 { + max-link-speed = <0x3>; + }; + + /* C8 X2 */ + pcie@140a0000 { + max-link-speed = <0x3>; + }; +}; diff --git a/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-modules/tegra234-cvm-e2421-1098.dtsi b/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-modules/tegra234-cvm-e2421-1098.dtsi index fea7b2a1e9..3cdec21d2e 100644 --- a/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-modules/tegra234-cvm-e2421-1098.dtsi +++ b/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-modules/tegra234-cvm-e2421-1098.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,4 +19,8 @@ pmc@c360000 { nvidia,invert-interrupt; }; + + nvpmodel { + status = "okay"; + }; }; diff --git a/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-modules/tegra234-cvm-e2421-1099.dtsi b/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-modules/tegra234-cvm-e2421-1099.dtsi index ebf8c2d7a5..41336ed4d2 100644 --- a/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-modules/tegra234-cvm-e2421-1099.dtsi +++ b/hardware/nvidia/platform/t23x/common/kernel-dts/t234-common-modules/tegra234-cvm-e2421-1099.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -29,4 +29,8 @@ dce@d800000 { status = "okay"; }; + + nvpmodel { + status = "okay"; + }; }; diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/cvb/tegra234-ethernet-3737-0000.dtsi b/hardware/nvidia/platform/t23x/concord/kernel-dts/cvb/tegra234-ethernet-3737-0000.dtsi index 50a768c554..35ca336469 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/cvb/tegra234-ethernet-3737-0000.dtsi +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/cvb/tegra234-ethernet-3737-0000.dtsi @@ -22,8 +22,8 @@ status = "okay"; nvidia,mac-addr-idx = <0>; nvidia,max-platform-mtu = <16383>; - /* 0=enable, 1=disable */ - nvidia,pause_frames = <0>; + /* 1=enable, 0=disable */ + nvidia,pause_frames = <1>; phy-handle = <&mgbe0_aqr113c_phy>; phy-mode = "10gbase-r"; /* 0:XFI 10G, 1:XFI 5G, 2:USXGMII 10G, 3:USXGMII 5G */ diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/cvb/tegra234-pwm-fan.dtsi b/hardware/nvidia/platform/t23x/concord/kernel-dts/cvb/tegra234-pwm-fan.dtsi index 1a7cf0bc89..5501c3b639 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/cvb/tegra234-pwm-fan.dtsi +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/cvb/tegra234-pwm-fan.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -13,54 +13,13 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#include -#include / { - pwm_fan_shared_data: pfsd { - num_resources = <0>; - secret = <47>; - active_steps = <10>; - active_rpm = <0 1000 2000 3000 4000 5000 6000 7000 10000 11000>; - rpm_diff_tolerance = <2>; - active_rru = <40 2 1 1 1 1 1 1 1 1>; - active_rrd = <40 2 1 1 1 1 1 1 1 1>; - state_cap_lookup = <2 2 2 2 3 3 3 4 4 4>; - pwm_period = <45334>; - pwm_id = <3>; - pwm_polarity = ; - suspend_state = <1>; - step_time = <100>; /* mesecs */ - state_cap = <7>; - active_pwm_max = <256>; - tach_period = <1000>; - pwm_gpio = <&tegra_main_gpio TEGRA234_MAIN_GPIO(Q, 2) GPIO_ACTIVE_LOW>; - }; - pwm-fan { compatible = "pwm-fan"; status = "okay"; #pwm-cells = <1>; pwms = <&tegra_pwm3 0 45334>; - shared_data = <&pwm_fan_shared_data>; - profiles { - default = "quiet"; - quiet { - state_cap = <4>; -#if TEGRA_PWM_FAN_DT_VERSION == DT_VERSION_2 - cooling-levels = <255 178 135 95 0 0 0 0 0 0>; -#else - active_pwm = <0 77 120 160 255 255 255 255 255 255>; -#endif - }; - cool { - state_cap = <4>; -#if TEGRA_PWM_FAN_DT_VERSION == DT_VERSION_2 - cooling-levels = <255 178 135 95 0 0 0 0 0 0>; -#else - active_pwm = <0 77 120 160 255 255 255 255 255 255>; -#endif - }; - }; + cooling-levels = <128 135 178 200 255 255 255 255 255 255>; }; }; diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3701-0000-as-p3767-0001-p3737-0000.dts b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3701-0000-as-p3767-0001-p3737-0000.dts index 077509df54..9646111820 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3701-0000-as-p3767-0001-p3737-0000.dts +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3701-0000-as-p3767-0001-p3737-0000.dts @@ -1,7 +1,7 @@ /* * Top level DTS file for CVM:P3701-0000 and CVB:P3737-0000. * - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -101,4 +101,10 @@ /delete-node/ cpu@10; /delete-node/ cpu@11; }; + + host1x@13e00000 { + nvdla1@158c0000 { + status = "disabled"; + }; + }; }; diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3701-0000-p3737-0000-kexec.dts b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3701-0000-p3737-0000-kexec.dts index cdaf1c2fa0..9e97983845 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3701-0000-p3737-0000-kexec.dts +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3701-0000-p3737-0000-kexec.dts @@ -21,24 +21,81 @@ grid-of-semaphores { status = "disabled"; }; - }; - aconnect@2a41000 { - adsp@2993000 { + ramoops_carveout { + status = "disabled"; + }; + + fsi-carveout { status = "disabled"; }; + + linux,cma { + status = "disabled"; + }; + }; + + tegra-carveouts { + status = "disabled"; + }; + + pcie@14100000 { + status = "disabled"; + }; + + pcie@14160000 { + status = "disabled"; + }; + + pcie@141a0000 { + status = "disabled"; + }; + + sound { + status = "disabled"; + }; + + actmon@d230000 { + status = "disabled"; + }; + + aon@c000000 { + status = "disabled"; + }; + + ethernet@6810000 { + status = "disabled"; + }; + + combined-uart { + status = "disabled"; + }; + + sdhci@3400000 { + status = "disabled"; + }; + + aconnect@2a41000 { + status = "disabled"; + }; + + rtcpu@bc00000 { + status = "disabled"; + }; + + tegra-hsp@c150000 { + status = "disabled"; + }; + + xudc@3550000 { + status = "disabled"; + }; + + cbb-fabric@1300000 { + status = "disabled"; }; chosen { - linux,uefi-mmap-desc-ver = <0x01>; - linux,uefi-mmap-desc-size = <0x30>; - linux,uefi-mmap-size = <0x1320>; - linux,uefi-mmap-start = <0x08 0x5477f018>; - linux,uefi-system-table = <0x08 0x7bc90018>; - linux,initrd-end = <0x08 0x52841122>; - linux,initrd-start = <0x08 0x52040000>; - nvidia,ether-mac0 = "48:B0:2D:5D:16:18"; - nvidia,ether-mac = "48:B0:2D:5D:16:18"; board-has-eeprom; }; }; diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3701-overlay.dts b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3701-overlay.dts index 39ae84a51b..d4d574e2a8 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3701-overlay.dts +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3701-overlay.dts @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,7 +25,7 @@ fragment@0 { target-path = "/"; board_config { - ids = "3701-0005-*"; + ids = "3701-0005-*","3701-0008-*"; }; __overlay__ { reserved-memory { diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-dual-hawk-ar0234-e3653-overlay.dts b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-dual-hawk-ar0234-e3653-overlay.dts index 9b28144a0b..c72e4bc2c5 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-dual-hawk-ar0234-e3653-overlay.dts +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-dual-hawk-ar0234-e3653-overlay.dts @@ -2,7 +2,7 @@ /* * Jetson Device-tree overlay for Camera Hawk & Owl on t23x platforms * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved. * */ @@ -20,7 +20,7 @@ / { overlay-name = "Jetson Camera e3653-dual-Hawk module"; jetson-header-name = "Jetson AGX CSI Connector"; - compatible = "nvidia,p3737-0000+p3701-0000"; + compatible = "nvidia,p3737-0000+p3701-0000", "nvidia,p3737-0000+p3701-0004", "nvidia,p3737-0000+p3701-0005"; /* VI number of channels */ fragment@0 { diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-dual-imx274-overlay.dts b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-dual-imx274-overlay.dts index 11a241d9ad..b51d0e85d6 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-dual-imx274-overlay.dts +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-dual-imx274-overlay.dts @@ -2,7 +2,7 @@ /* * Jetson Device-tree overlay for Camera Dual-IMX274 on t23x platforms * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved. * */ @@ -13,7 +13,7 @@ / { overlay-name = "Jetson Camera Dual-IMX274"; jetson-header-name = "Jetson AGX CSI Connector"; - compatible = "nvidia,p3737-0000+p3701-0000"; + compatible = "nvidia,p3737-0000+p3701-0000", "nvidia,p3737-0000+p3701-0004", "nvidia,p3737-0000+p3701-0005"; fragment@0 { target-path = "/i2c@3180000/tca9546@70/i2c@0/imx274_a@1a"; diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-e3331-overlay.dts b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-e3331-overlay.dts index d8788dea9e..72f4e84018 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-e3331-overlay.dts +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-e3331-overlay.dts @@ -2,7 +2,7 @@ /* * Jetson Device-tree overlay for Camera E3331 on t23x platforms * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved. * */ @@ -20,7 +20,7 @@ / { overlay-name = "Jetson Camera E3331 module"; jetson-header-name = "Jetson AGX CSI Connector"; - compatible = "nvidia,p3737-0000+p3701-0000"; + compatible = "nvidia,p3737-0000+p3701-0000", "nvidia,p3737-0000+p3701-0004", "nvidia,p3737-0000+p3701-0005"; /* E3331 camera board */ diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-e3333-overlay.dts b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-e3333-overlay.dts index df2309ef05..35053968cf 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-e3333-overlay.dts +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-e3333-overlay.dts @@ -2,7 +2,7 @@ /* * Jetson Device-tree overlay for Camera E3333 module on t23x platforms * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved. * */ @@ -20,7 +20,7 @@ / { overlay-name = "Jetson Camera E3333 module"; jetson-header-name = "Jetson AGX CSI Connector"; - compatible = "nvidia,p3737-0000+p3701-0000"; + compatible = "nvidia,p3737-0000+p3701-0000" , "nvidia,p3737-0000+p3701-0004", "nvidia,p3737-0000+p3701-0005"; /* Use either odm-data or eeprom-ids for sensor detection */ diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-hawk-owl-overlay.dts b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-hawk-owl-overlay.dts index e9a6de69d1..208627119c 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-hawk-owl-overlay.dts +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-hawk-owl-overlay.dts @@ -2,7 +2,7 @@ /* * Jetson Device-tree overlay for Camera Hawk & Owl on t23x platforms * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved. * */ @@ -13,7 +13,7 @@ / { overlay-name = "Jetson Camera Hawk-Owl p3762 module"; jetson-header-name = "Jetson AGX CSI Connector"; - compatible = "nvidia,p3737-0000+p3701-0000"; + compatible = "nvidia,p3737-0000+p3701-0000" , "nvidia,p3737-0000+p3701-0004", "nvidia,p3737-0000+p3701-0005"; /* VI number of channels */ fragment@0 { diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-imx185-overlay.dts b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-imx185-overlay.dts index c3216a2cbe..f32c7621d3 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-imx185-overlay.dts +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-imx185-overlay.dts @@ -2,7 +2,7 @@ /* * Jetson Device-tree overlay for Camera IMX185 on t23x platforms * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved. * */ @@ -12,7 +12,7 @@ / { overlay-name = "Jetson Camera IMX185"; jetson-header-name = "Jetson AGX CSI Connector"; - compatible = "nvidia,p3737-0000+p3701-0000"; + compatible = "nvidia,p3737-0000+p3701-0000" , "nvidia,p3737-0000+p3701-0004", "nvidia,p3737-0000+p3701-0005"; /* IMX185 module using ODM-DATA */ fragment@0 { diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-imx390-overlay.dts b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-imx390-overlay.dts index 3fe62ae483..0730a3936b 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-imx390-overlay.dts +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-camera-imx390-overlay.dts @@ -2,7 +2,7 @@ /* * Jetson Device-tree overlay for Camera IMX390 on t23x platforms * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved. * */ @@ -19,7 +19,7 @@ / { overlay-name = "Jetson Camera IMX390"; jetson-header-name = "Jetson AGX CSI Connector"; - compatible = "nvidia,p3737-0000+p3701-0000"; + compatible = "nvidia,p3737-0000+p3701-0000", "nvidia,p3737-0000+p3701-0004", "nvidia,p3737-0000+p3701-0005"; /* IMX390 module using ids */ fragment@0 { diff --git a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-overlay.dts b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-overlay.dts index 5cc0eb6b3f..7c4cf429db 100644 --- a/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-overlay.dts +++ b/hardware/nvidia/platform/t23x/concord/kernel-dts/tegra234-p3737-overlay.dts @@ -152,4 +152,16 @@ }; }; }; + + fragment@8 { + target-path = "/"; + board_config { + odm-data = "disable-tegra-wdt"; + }; + __overlay__ { + watchdog@2190000 { + status = "disabled"; + }; + }; + }; }; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/Makefile b/hardware/nvidia/platform/t23x/p3768/kernel-dts/Makefile index 8ceb482f53..f306119fe8 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/Makefile +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/Makefile @@ -39,6 +39,7 @@ dtbo-$(BUILD_ENABLE) += tegra234-p3767-camera-p3768-imx477-imx219.dtbo dtbo-$(BUILD_ENABLE) += tegra234-p3767-camera-p3768-imx219-dual.dtbo dtbo-$(BUILD_ENABLE) += tegra234-p3767-camera-p3768-imx477-dual.dtbo dtbo-$(BUILD_ENABLE) += tegra234-p3767-camera-p3768-imx477-dual-4lane.dtbo +dtbo-$(BUILD_ENABLE) += tegra234-p3767-overlay.dtbo ifneq ($(dtb-y),) dtb-y := $(addprefix $(makefile-path)/,$(dtb-y)) diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/cvb/tegra234-camera-avt-csi2.dtsi b/hardware/nvidia/platform/t23x/p3768/kernel-dts/cvb/tegra234-camera-avt-csi2.dtsi index 7caa0d2f78..597dd140f4 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/cvb/tegra234-camera-avt-csi2.dtsi +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/cvb/tegra234-camera-avt-csi2.dtsi @@ -25,7 +25,7 @@ vi_port0: port@0 { reg = <0>; avt_csi2_vi_in0: endpoint { - port-index = <0>; + port-index = <1>; bus-width = <2>; remote-endpoint = <&avt_csi2_csi_out0>; }; @@ -34,7 +34,7 @@ reg = <1>; avt_csi2_vi_in1: endpoint { port-index = <2>; - bus-width = <2>; + bus-width = <4>; remote-endpoint = <&avt_csi2_csi_out1>; }; }; @@ -53,7 +53,7 @@ csi_chan0_port0: port@0 { reg = <0>; avt_csi2_csi_in0: endpoint@0 { - port-index = <0>; + port-index = <1>; bus-width = <2>; remote-endpoint = <&avt_csi2_out0>; }; @@ -75,7 +75,7 @@ reg = <0>; avt_csi2_csi_in1: endpoint@2 { port-index = <2>; - bus-width = <2>; + bus-width = <4>; remote-endpoint = <&avt_csi2_out1>; }; }; @@ -105,7 +105,7 @@ mode0 { num_lanes = "2"; - tegra_sinterface = "serial_a"; + tegra_sinterface = "serial_b"; discontinuous_clk = "no"; cil_settletime = "0"; embedded_metadata_height = "0"; @@ -148,7 +148,7 @@ port@0 { reg = <0>; avt_csi2_out0: endpoint { - port-index = <0>; + port-index = <1>; bus-width = <2>; remote-endpoint = <&avt_csi2_csi_in0>; }; @@ -169,7 +169,7 @@ devnode = "video1"; mode0 { - num_lanes = "2"; + num_lanes = "4"; tegra_sinterface = "serial_c"; discontinuous_clk = "no"; cil_settletime = "0"; @@ -215,7 +215,7 @@ avt_csi2_out1: endpoint { status = "okay"; port-index = <2>; - bus-width = <2>; + bus-width = <4>; remote-endpoint = <&avt_csi2_csi_in1>; }; }; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/cvb/tegra234-p3768-0000-a0.dtsi b/hardware/nvidia/platform/t23x/p3768/kernel-dts/cvb/tegra234-p3768-0000-a0.dtsi index 01f41c8d08..321fcfd63a 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/cvb/tegra234-p3768-0000-a0.dtsi +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/cvb/tegra234-p3768-0000-a0.dtsi @@ -18,6 +18,7 @@ #include "tegra234-p3768-audio.dtsi" #include "tegra234-p3768-camera-rbpcv3-imx477.dtsi" #include "tegra234-p3768-camera-rbpcv2-imx219.dtsi" +#include "tegra234-p3768-camera-avt-csi2.dtsi" / { gpio-keys { diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-dcb-p3767-0000-hdmi.dtsi b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-dcb-p3767-0000-hdmi.dtsi index 0ebd3e3ecb..d3573266e3 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-dcb-p3767-0000-hdmi.dtsi +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-dcb-p3767-0000-hdmi.dtsi @@ -1,16 +1,8 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ + * SPDX-License-Identifier: GPL-2.0 +*/ / { display@13800000 { @@ -18,7 +10,7 @@ 55 aa 16 00 00 37 34 30 30 e9 4c 19 77 cc 56 49 44 45 4f 20 0d 00 00 00 70 01 00 00 00 00 49 42 4d 20 56 47 41 20 43 6f 6d 70 61 74 69 62 6c 65 - 01 00 00 00 10 00 82 18 30 34 2f 32 30 2f 32 32 + 01 00 00 00 10 00 82 18 30 33 2f 31 36 2f 32 33 00 00 00 00 00 00 00 00 21 18 50 00 e1 2b 00 00 50 4d 49 44 00 00 00 00 00 00 00 a0 00 b0 00 b8 00 c0 00 0e 47 41 31 30 42 20 56 47 41 20 42 49 @@ -29,7 +21,7 @@ 00 00 00 00 00 56 65 72 73 69 6f 6e 20 39 34 2e 30 42 2e 30 30 2e 30 30 2e 32 31 20 0d 0a 00 43 6f 70 79 72 69 67 68 74 20 28 43 29 20 31 39 39 - 36 2d 32 30 32 32 20 4e 56 49 44 49 41 20 43 6f + 36 2d 32 30 32 33 20 4e 56 49 44 49 41 20 43 6f 72 70 2e 0d 0a 00 00 00 ff ff 00 00 00 00 ff ff 47 50 55 20 42 6f 61 72 64 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 @@ -54,41 +46,41 @@ 00 00 00 00 00 00 00 00 00 00 00 00 0b 94 21 00 00 00 00 00 a8 07 00 00 00 00 00 00 00 00 02 00 5c 5c 2e 02 00 00 42 02 04 00 10 00 00 00 00 e9 - 0e 00 00 00 00 00 00 39 44 00 00 e7 2d 00 00 00 - 00 00 00 00 00 00 00 00 00 00 00 92 30 00 00 e5 - 44 00 00 23 45 00 00 4a 45 00 00 00 00 00 00 f6 + 0e 00 00 00 00 00 00 3f 44 00 00 e7 2d 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 92 30 00 00 eb + 44 00 00 29 45 00 00 50 45 00 00 00 00 00 00 f6 04 00 00 00 00 fa 04 00 00 66 08 fa 04 16 2b 66 08 18 2b a2 04 ef 09 04 22 d4 09 c2 21 18 2b 90 - 00 9b 22 01 68 08 56 09 f4 43 00 00 fe 43 00 00 - f7 0f 00 00 f0 21 00 00 fc 21 00 00 54 4a 00 00 + 00 9b 22 01 68 08 56 09 fa 43 00 00 04 44 00 00 + f7 0f 00 00 f0 21 00 00 fc 21 00 00 30 4b 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d5 33 00 00 bb 36 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 b3 3c 00 00 00 00 00 00 ed 3c 00 00 12 43 00 00 00 00 00 00 - 00 00 00 00 df 33 00 00 32 3d 00 00 a0 43 00 00 - ad 36 00 00 00 00 00 00 00 00 00 00 c2 43 00 00 + 00 00 00 00 df 33 00 00 32 3d 00 00 a6 43 00 00 + ad 36 00 00 00 00 00 00 00 00 00 00 c8 43 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 61 0b 00 00 dd 0a 00 00 77 0b 00 00 11 3c 00 00 17 3c 00 00 1c 3c 00 00 20 3c 00 00 2a 3c 00 00 31 3c 00 00 3f 3c 00 00 81 3c 00 00 - 00 00 00 00 00 00 00 00 96 3c 00 00 f0 45 00 00 - 96 47 00 00 0b 48 00 00 91 49 00 00 80 4b 00 00 - bc 4b 00 00 e6 49 00 00 9c 3c 00 00 79 3c 00 00 - 00 00 00 00 00 00 00 00 00 00 00 00 ec 4d 00 00 + 00 00 00 00 00 00 00 00 96 3c 00 00 f6 45 00 00 + 9c 47 00 00 11 48 00 00 63 4a 00 00 5c 4c 00 00 + 98 4c 00 00 c2 4a 00 00 9c 3c 00 00 79 3c 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 c8 4e 00 00 a0 3c 00 00 a9 3c 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 64 00 50 b5 00 19 cf 00 28 91 0e 14 a5 0e 23 00 01 23 23 01 14 00 00 00 0c 11 00 00 00 00 83 17 00 00 c8 0e 01 00 00 0d - 0e df 0c 00 00 00 00 01 01 00 00 00 00 af 1d 31 - 4e 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - b2 2d 00 00 00 00 0b 94 21 00 00 f1 8d dc 01 f0 - 03 00 00 30 34 2f 32 30 2f 32 32 00 00 00 00 00 + 0e df 0c 00 00 00 00 01 01 00 00 00 00 af 1d 0d + 4f 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + b2 2d 00 00 00 00 0b 94 21 00 00 66 60 f1 01 f0 + 03 00 00 30 33 2f 31 36 2f 32 33 00 00 00 00 00 00 00 00 00 00 00 00 21 01 10 00 00 00 80 01 00 00 00 00 00 30 30 30 30 30 30 30 30 30 30 30 30 - 00 00 00 00 00 00 00 00 03 42 00 00 b6 57 d9 72 - c8 27 46 66 aa 5e 5e 60 8f 8b 97 1d 54 33 00 00 - 00 00 00 00 c9 4c 00 00 00 00 00 00 00 00 97 4e + 00 00 00 00 00 00 00 00 03 42 00 00 1c 06 99 a6 + b9 51 42 01 b8 8b e9 f1 13 c6 1b 3f 54 33 00 00 + 00 00 00 00 a5 4d 00 00 00 00 00 00 00 00 73 4f 00 00 01 00 10 00 bf 09 30 00 02 00 94 22 00 00 00 00 01 00 44 00 6b 09 00 00 a2 04 00 00 56 09 00 00 fa 04 00 00 00 00 00 00 66 08 00 00 78 08 @@ -272,10 +264,10 @@ 28 ff 01 1f 0f 95 01 95 01 d0 07 a0 0f 1b 00 1b 00 0f 0f 32 ff 01 3f 10 04 02 06 00 00 00 07 00 07 00 07 00 07 00 07 10 05 04 10 04 0f 0f 0f 0f - 2f 2f 2f 2f 1c 1c 1c 1c 0f 46 40 00 0f 0f 0f 0f - 2f 2f 2f 2f 1d 1d 1d 1d 0f 46 40 00 0f 0f 0f 0f - 2f 2f 2f 2f 1e 1e 1e 1e 0f 46 40 00 0f 0f 0f 0f - 2f 2f 2f 2f 1f 1f 1f 1f 0f 46 40 00 0f 0f 0f 0f + 2f 2f 2f 2f 1c 1c 1c 1c 0f 46 40 00 0e 0e 0e 0e + 2d 2d 2d 2d 13 13 13 13 0f 46 40 00 0e 0e 0e 0e + 2c 2c 2c 2c 15 15 15 15 0f 46 40 00 0e 0e 0e 0e + 2b 2b 2b 2b 17 17 17 17 0f 46 40 00 0f 0f 0f 0f 2d 2d 2d 2d 19 19 19 19 0f 46 40 00 0f 0f 0f 0f 2c 2c 2c 2c 1b 1b 1b 1b 0f 46 40 00 0f 0f 0f 0f 2b 2b 2b 2b 1d 1d 1d 1d 0f 46 40 00 0f 0f 0f 0f @@ -296,13 +288,13 @@ 05 00 00 00 00 00 00 00 00 88 58 24 00 00 00 00 00 75 40 00 00 00 00 0a 05 00 06 00 00 00 00 00 38 3d 3e 3f 3a 3f 3f 3f 3f 05 05 05 05 0a 0a 0a - 0a 00 00 00 00 88 58 24 00 00 00 00 00 65 19 00 + 0a 00 00 00 00 88 58 24 00 aa aa 00 00 65 19 00 00 00 00 0a 05 00 06 00 00 00 00 00 48 3a 3a 3a 3a 3a 3a 3a 3a 00 00 00 00 00 00 00 00 00 00 00 - 00 f8 5a 24 00 00 00 00 00 00 00 00 00 00 00 0a + 00 f8 5a 24 00 aa aa 00 00 00 00 00 00 00 00 0a 0a 00 06 00 00 00 00 00 58 3a 3a 3a 3a 3a 3a 3a 3a 00 00 00 00 00 00 00 00 00 00 00 00 f8 5a 24 - 00 00 00 00 00 03 00 00 01 0a 05 0f 46 40 00 00 + 00 aa aa 00 00 03 00 00 01 0a 05 0f 46 40 00 00 03 00 44 06 00 00 01 0a 08 0f 46 40 00 00 03 00 44 08 00 00 01 0a 05 0f 46 40 00 00 03 00 44 0a 00 00 01 0a 05 0f 46 40 00 00 03 00 44 0c 00 00 @@ -409,7 +401,7 @@ 00 2b 00 71 6e 00 23 61 40 ff ff 80 fc 00 00 2f 00 71 41 23 10 08 25 19 cb bd dc 4e 78 08 00 00 00 00 00 00 67 19 ec 19 c1 00 00 00 00 00 00 00 - 00 00 00 00 00 02 03 80 01 10 00 62 04 0e 03 80 + 00 00 00 00 00 02 03 80 01 10 00 62 04 0e 01 80 01 10 00 02 04 0e 11 02 01 10 00 02 00 2e 32 03 02 10 00 02 00 fe 40 04 00 00 00 00 00 0f 00 00 00 00 00 00 00 0f 00 00 00 00 00 00 00 0f 00 00 diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-as-p3767-0001-p3509-a02.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-as-p3767-0001-p3509-a02.dts index 9eb9b036b9..8b852d00a4 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-as-p3767-0001-p3509-a02.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-as-p3767-0001-p3509-a02.dts @@ -30,4 +30,10 @@ "nvidia,p3767-0000-as-p3767-0001", "nvidia,tegra234", "nvidia,tegra23x"; model = "NVIDIA Orin NX 16GB as Orin NX 8GB"; + + host1x@13e00000 { + nvdla1@158c0000 { + status = "disabled"; + }; + }; }; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-as-p3767-0001-p3768-0000-a0.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-as-p3767-0001-p3768-0000-a0.dts index d074dfa18f..a726d39c2a 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-as-p3767-0001-p3768-0000-a0.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-as-p3767-0001-p3768-0000-a0.dts @@ -30,4 +30,10 @@ "nvidia,p3767-0000-as-p3767-0001", "nvidia,tegra234", "nvidia,tegra23x"; model = "NVIDIA Orin NX 16GB as Orin NX 8GB"; + + host1x@13e00000 { + nvdla1@158c0000 { + status = "disabled"; + }; + }; }; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-common-hdr40.dtsi b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-common-hdr40.dtsi index 706f48af34..70033d67a9 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-common-hdr40.dtsi +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-common-hdr40.dtsi @@ -67,6 +67,13 @@ nvidia,tristate = ; nvidia,enable-input = ; }; + hdr40-pin15 { + nvidia,pins = "soc_gpio39_pn1"; + nvidia,function = "gp"; + nvidia,pin-group = "pwm1"; + nvidia,tristate = ; + nvidia,enable-input = ; + }; hdr40-pin16 { nvidia,pins = "spi3_cs1_py4"; nvidia,function = "spi3"; @@ -129,6 +136,40 @@ hdr40-pin28 { nvidia,pins = "gen2_i2c_scl_pcc7"; }; + hdr40-pin29 { + nvidia,pins = "soc_gpio32_pq5"; + nvidia,function = "extperiph3"; + nvidia,pin-group = "extperiph3_clk"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,io-high-voltage = ; + nvidia,lpdr = ; + }; + hdr40-pin31 { + nvidia,pins = "soc_gpio33_pq6"; + nvidia,function = "extperiph4"; + nvidia,pin-group = "extperiph4_clk"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,io-high-voltage = ; + nvidia,lpdr = ; + }; + hdr40-pin32 { + nvidia,pins = "soc_gpio19_pg6"; + nvidia,function = "gp"; + nvidia,pin-group = "pwm7"; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + hdr40-pin33 { + nvidia,pins = "soc_gpio21_ph0"; + nvidia,function = "gp"; + nvidia,pin-group = "pwm5"; + nvidia,tristate = ; + nvidia,enable-input = ; + }; hdr40-pin35 { nvidia,pins = "soc_gpio44_pi2"; nvidia,function = "i2s2"; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-p3509-a02-csi.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-p3509-a02-csi.dts index 495e3733f4..5a4771cfb6 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-p3509-a02-csi.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-p3509-a02-csi.dts @@ -18,10 +18,11 @@ /plugin/; #include +#include / { overlay-name = "Jetson Nano CSI Connector"; - compatible = "nvidia,p3509-0000+p3767-0000"; + compatible = JETSON_COMPATIBLE_P3509; fragment@0 { target = <&pinmux>; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-p3768-0000-csi.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-p3768-0000-csi.dts index b7d507c9a4..30215a54a8 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-p3768-0000-csi.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0000-p3768-0000-csi.dts @@ -22,10 +22,11 @@ /plugin/; #include +#include / { overlay-name = "Jetson 24pin CSI Connector"; - compatible = "nvidia,p3768-0000+p3767-0000"; + compatible = JETSON_COMPATIBLE_P3768; fragment@0 { target = <&pinmux>; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0001-p3509-a02.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0001-p3509-a02.dts index 4d3f3833da..74579399aa 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0001-p3509-a02.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0001-p3509-a02.dts @@ -1,7 +1,7 @@ /* * Top level DTS file for CVM:P3767-0001 and CVB:P3509-a02-0000. * - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,6 +15,7 @@ /dts-v1/; #include "tegra234-p3767-0000-p3509-a02.dts" +#include / { nvidia,dtsfilename = __FILE__; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0001-p3768-0000-a0.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0001-p3768-0000-a0.dts index fb51b048fc..1e2687ed83 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0001-p3768-0000-a0.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0001-p3768-0000-a0.dts @@ -1,7 +1,7 @@ /* * Top level DTS file for CVM:P3767-0001 and CVB:P3768-0000. * - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,6 +16,7 @@ /dts-v1/; #include "tegra234-p3767-0000-p3768-0000-a0.dts" +#include / { nvidia,dtsfilename = __FILE__; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0003-p3509-a02.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0003-p3509-a02.dts index 3bd910e654..aef918ef60 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0003-p3509-a02.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0003-p3509-a02.dts @@ -1,7 +1,7 @@ /* * Top level DTS file for CVM:P3767-0003/P3767-0005 and CVB:P3509-a02-0000. * - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,6 +15,8 @@ /dts-v1/; #include "tegra234-p3767-0000-p3509-a02.dts" +#include +#include / { nvidia,dtsfilename = __FILE__; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0003-p3768-0000-a0.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0003-p3768-0000-a0.dts index c41c74791f..ed01d29c2f 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0003-p3768-0000-a0.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0003-p3768-0000-a0.dts @@ -1,7 +1,7 @@ /* * Top level DTS file for CVM:P3767-0003/P3767-0005 and CVB:P3768-0000. * - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,6 +16,8 @@ /dts-v1/; #include "tegra234-p3767-0000-p3768-0000-a0.dts" +#include +#include / { nvidia,dtsfilename = __FILE__; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0004-p3509-a02.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0004-p3509-a02.dts index ee008764d5..df48ddb5f2 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0004-p3509-a02.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0004-p3509-a02.dts @@ -1,7 +1,7 @@ /* * Top level DTS file for CVM:P3767-0004 and CVB:P3509-a02-0000. * - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,6 +15,8 @@ /dts-v1/; #include "tegra234-p3767-0000-p3509-a02.dts" +#include +#include / { nvidia,dtsfilename = __FILE__; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0004-p3768-0000-a0.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0004-p3768-0000-a0.dts index 4169ed0d9e..541e3dd259 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0004-p3768-0000-a0.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-0004-p3768-0000-a0.dts @@ -1,7 +1,7 @@ /* * Top level DTS file for CVM:P3767-0004 and CVB:P3768-0000. * - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,6 +16,8 @@ /dts-v1/; #include "tegra234-p3767-0000-p3768-0000-a0.dts" +#include +#include / { nvidia,dtsfilename = __FILE__; diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-imx219-dual.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-imx219-dual.dts index 3809171c1b..2bdcb9b71d 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-imx219-dual.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-imx219-dual.dts @@ -3,17 +3,19 @@ * Jetson Device-tree overlay for dual camera IMX219 rbpcv2 on dual * cam connector baseboards * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved. * */ /dts-v1/; /plugin/; +#include + / { overlay-name = "Camera IMX219 Dual"; jetson-header-name = "Jetson Nano CSI Connector"; - compatible = "nvidia,p3509-0000+p3767-0000"; + compatible = JETSON_COMPATIBLE_P3509; /* IMX219 dual sensor module */ fragment@0 { diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-imx477-dual.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-imx477-dual.dts index 622f859d56..bafbba9ed3 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-imx477-dual.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-imx477-dual.dts @@ -3,17 +3,19 @@ * Jetson Device-tree overlay for dual camera IMX477 rbpcv3 on * dual cam connector baseboards * - * Copyright (c) 2021-2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023 NVIDIA CORPORATION. All rights reserved. * */ /dts-v1/; /plugin/; +#include + / { overlay-name = "Camera IMX477 Dual"; jetson-header-name = "Jetson Nano CSI Connector"; - compatible = "nvidia,p3509-0000+p3767-0000"; + compatible = JETSON_COMPATIBLE_P3509; /* IMX477 dual sensor module */ fragment@0 { diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-imx477-imx219.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-imx477-imx219.dts index 0a8f080b66..9c2681d3d1 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-imx477-imx219.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-imx477-imx219.dts @@ -3,17 +3,19 @@ * Jetson Device-tree overlay to enable Camera IMX477 rbpcv3 on portA along with * camera IMX219 on portB, for dual cam connector baseboards * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved. * */ /dts-v1/; /plugin/; +#include + / { overlay-name = "Camera IMX477-A and IMX219-C"; jetson-header-name = "Jetson Nano CSI Connector"; - compatible = "nvidia,p3509-0000+p3767-0000"; + compatible = JETSON_COMPATIBLE_P3509; /* IMX77 sensor module on CSI PORT A and IMX219 sensor module on CSI PORT B */ fragment@0 { diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx219-dual.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx219-dual.dts index 0c95655fee..5b29560908 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx219-dual.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx219-dual.dts @@ -3,17 +3,19 @@ * Jetson Device-tree overlay for dual camera IMX219 rbpcv2 on dual * cam connector baseboards * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved. * */ /dts-v1/; /plugin/; +#include + / { overlay-name = "Camera IMX219 Dual"; jetson-header-name = "Jetson 24pin CSI Connector"; - compatible = "nvidia,p3768-0000+p3767-0000"; + compatible = JETSON_COMPATIBLE_P3768; /* IMX219 dual sensor module */ fragment@0 { diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx477-dual-4lane.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx477-dual-4lane.dts index ff08b0d116..27ab9cf011 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx477-dual-4lane.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx477-dual-4lane.dts @@ -10,10 +10,12 @@ /dts-v1/; /plugin/; +#include + / { overlay-name = "Camera IMX477 Dual 4 Lane"; jetson-header-name = "Jetson 24pin CSI Connector"; - compatible = "nvidia,p3768-0000+p3767-0000"; + compatible = JETSON_COMPATIBLE_P3768; /* IMX477 4 lane sensor module */ fragment@0 { diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx477-dual.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx477-dual.dts index 3027a6e298..fc88222cb4 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx477-dual.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx477-dual.dts @@ -3,17 +3,19 @@ * Jetson Device-tree overlay for dual camera IMX477 rbpcv3 on * dual cam connector baseboards * - * Copyright (c) 2021-2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023 NVIDIA CORPORATION. All rights reserved. * */ /dts-v1/; /plugin/; +#include + / { overlay-name = "Camera IMX477 Dual"; jetson-header-name = "Jetson 24pin CSI Connector"; - compatible = "nvidia,p3768-0000+p3767-0000"; + compatible = JETSON_COMPATIBLE_P3768; /* IMX477 dual sensor module */ fragment@0 { diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx477-imx219.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx477-imx219.dts index abd9a2bb07..ae71d949ba 100644 --- a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx477-imx219.dts +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-camera-p3768-imx477-imx219.dts @@ -3,17 +3,19 @@ * Jetson Device-tree overlay to enable Camera IMX477 rbpcv3 on portA along with * camera IMX219 on portB, for dual cam connector baseboards * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved. * */ /dts-v1/; /plugin/; +#include + / { overlay-name = "Camera IMX477-A and IMX219-C"; jetson-header-name = "Jetson 24pin CSI Connector"; - compatible = "nvidia,p3768-0000+p3767-0000"; + compatible = JETSON_COMPATIBLE_P3768; /* IMX77 sensor module on CSI PORT A and IMX219 sensor module on CSI PORT B */ fragment@0 { diff --git a/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-overlay.dts b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-overlay.dts new file mode 100644 index 0000000000..98c6c38207 --- /dev/null +++ b/hardware/nvidia/platform/t23x/p3768/kernel-dts/tegra234-p3767-overlay.dts @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/dts-v1/; +/plugin/; + +/ { + overlay-name = "P3767 Overlay Support"; + compatible = "nvidia,tegra23x"; + nvidia,dtsfilename = __FILE__; + nvidia,dtbbuildtime = __DATE__, __TIME__; + + fragment@0 { + target-path = "/host1x@13e00000"; + board_config { + fuse-info = "fuse-disable-nvenc"; + }; + __overlay__ { + nvenc@154c0000 { + status = "disabled"; + }; + }; + }; + + fragment@1 { + target-path = "/host1x@13e00000"; + board_config { + fuse-info = "fuse-disable-pva"; + }; + __overlay__ { + pva0 { + status = "disabled"; + }; + }; + }; + + fragment@2 { + target-path = "/host1x@13e00000"; + board_config { + fuse-info = "fuse-disable-dla0"; + }; + __overlay__ { + nvdla0@15880000 { + status = "disabled"; + }; + }; + }; + + fragment@3 { + target-path = "/host1x@13e00000"; + board_config { + fuse-info = "fuse-disable-dla1"; + }; + __overlay__ { + nvdla1@158c0000 { + status = "disabled"; + }; + }; + }; +}; diff --git a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/cvb/tegra234-p3740-0002-b00.dtsi b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/cvb/tegra234-p3740-0002-b00.dtsi index a60ae63d60..2e79e6fef4 100644 --- a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/cvb/tegra234-p3740-0002-b00.dtsi +++ b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/cvb/tegra234-p3740-0002-b00.dtsi @@ -55,6 +55,21 @@ }; }; + /* SPI1 connects to TPM */ + spi@3210000 { + status = "okay"; + spi@0 { /* chip select 0 */ + compatible = "tegra-spidev"; + reg = <0x0>; + spi-max-frequency = <50000000>; + controller-data { + nvidia,enable-hw-based-cs; + nvidia,rx-clk-tap-delay = <0x10>; + nvidia,tx-clk-tap-delay = <0x0>; + }; + }; + }; + serial@3110000 { status = "okay"; }; @@ -229,4 +244,12 @@ "usb3-0", "usb3-1", "usb3-2"; nvidia,xusb-padctl = <&xusb_padctl>; }; + + mttcan@c310000 { + status = "okay"; + }; + + mttcan@c320000 { + status = "okay"; + }; }; diff --git a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/cvb/tegra234-p3740-audio.dtsi b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/cvb/tegra234-p3740-audio.dtsi index 02d8372d57..29a8378166 100644 --- a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/cvb/tegra234-p3740-audio.dtsi +++ b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/cvb/tegra234-p3740-audio.dtsi @@ -118,3 +118,12 @@ prefix = "CVB-RT"; }; }; + +/* I2S link on CSI Connector */ +csi_conn_i2s: &i2s6_to_codec { + /* + * HDMI IN board P3785 for HDMI->CSI I2S signals. + */ + bitclock-master; + frame-master; +}; diff --git a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/cvb/tegra234-pwm-fan.dtsi b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/cvb/tegra234-pwm-fan.dtsi index 1a7cf0bc89..8d3014a67a 100644 --- a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/cvb/tegra234-pwm-fan.dtsi +++ b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/cvb/tegra234-pwm-fan.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -13,53 +13,41 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#include -#include / { - pwm_fan_shared_data: pfsd { - num_resources = <0>; - secret = <47>; - active_steps = <10>; - active_rpm = <0 1000 2000 3000 4000 5000 6000 7000 10000 11000>; - rpm_diff_tolerance = <2>; - active_rru = <40 2 1 1 1 1 1 1 1 1>; - active_rrd = <40 2 1 1 1 1 1 1 1 1>; - state_cap_lookup = <2 2 2 2 3 3 3 4 4 4>; - pwm_period = <45334>; - pwm_id = <3>; - pwm_polarity = ; - suspend_state = <1>; - step_time = <100>; /* mesecs */ - state_cap = <7>; - active_pwm_max = <256>; - tach_period = <1000>; - pwm_gpio = <&tegra_main_gpio TEGRA234_MAIN_GPIO(Q, 2) GPIO_ACTIVE_LOW>; - }; + i2c@c250000 { + f75308@4d { + compatible = "fintek,f75308"; + reg = <0x4d>; + #address-cells = <1>; + #size-cells = <0>; + + fan@0 { + reg = <0x0>; + type = "pwm"; + duty = "manual_duty"; + 5seg = <100 80 60 40 20>; + }; - pwm-fan { - compatible = "pwm-fan"; - status = "okay"; - #pwm-cells = <1>; - pwms = <&tegra_pwm3 0 45334>; - shared_data = <&pwm_fan_shared_data>; - profiles { - default = "quiet"; - quiet { - state_cap = <4>; -#if TEGRA_PWM_FAN_DT_VERSION == DT_VERSION_2 - cooling-levels = <255 178 135 95 0 0 0 0 0 0>; -#else - active_pwm = <0 77 120 160 255 255 255 255 255 255>; -#endif + fan@1 { + reg = <0x1>; + type = "pwm"; + duty = "manual_duty"; + 5seg = <100 80 60 40 20>; }; - cool { - state_cap = <4>; -#if TEGRA_PWM_FAN_DT_VERSION == DT_VERSION_2 - cooling-levels = <255 178 135 95 0 0 0 0 0 0>; -#else - active_pwm = <0 77 120 160 255 255 255 255 255 255>; -#endif + + fan@2 { + reg = <0x2>; + type = "pwm"; + duty = "manual_duty"; + 5seg = <100 80 60 40 20>; + }; + + fan@3 { + reg = <0x3>; + type = "pwm"; + duty = "manual_duty"; + 5seg = <100 80 60 40 20>; }; }; }; diff --git a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3701-0002-p3740-0002-safety.dts b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3701-0002-p3740-0002-safety.dts index f1f68a0d61..a94a79c802 100644 --- a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3701-0002-p3740-0002-safety.dts +++ b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3701-0002-p3740-0002-safety.dts @@ -23,11 +23,129 @@ compatible = "nvidia,p3740-0002+p3701-0002-safety", "nvidia,tegra23x", "nvidia,tegra234"; - safetyservices_epl_client { + /* This is hsp_top2 where fsi-ccplex comm happens in all below nodes */ + tegra-hsp@1600000 { status = "okay"; }; - tegra-hsp@1600000 { + fsicom_client { /* Kernel driver on which FsiComIvc relies on */ + status = "okay"; + }; + + FsiComIvc { /* CCPLEX-FSI Comm Daemon uses below DT node */ + compatible = "nvidia,tegra-fsicom-channels"; status = "okay"; + nChannel=<7>; + channel_0{ + frame-count = <4>; + frame-size = <1024>; + NvSciCh = "nvfsicom_EPD"; + }; + channel_1{ + frame-count = <30>; + frame-size = <64>; + NvSciCh = "nvfsicom_CcplexApp"; + }; + channel_2{ + frame-count = <4>; + frame-size = <64>; + NvSciCh = "nvfsicom_CcplexApp_state_change"; + }; + channel_3{ + frame-count = <4>; + frame-size = <64>; + NvSciCh = "nvfsicom_app1"; + }; + channel_4{ + frame-count = <2>; + frame-size = <512>; + NvSciCh = "nvfsicom_app2"; + }; + channel_5{ + frame-count = <4>; + frame-size = <64>; + NvSciCh = "nvfsicom_appGR"; + }; + channel_6{ + frame-count = <4>; + frame-size = <10240>; + }; + }; + + /* + * The app is responsible to send runtime tunables from the node SS_ErrorReportingConfig + * for the FSI FW + */ + FsiComClientChConfigEpd { + compatible = "nvidia,tegra-fsicom-EPD"; + status = "disabled"; + channelid_list = <0>; + }; + + SS_ErrorReportingConfig { + compatible = "nvidia,tegra-SafetyServiceConfig"; + status = "disabled"; + /*Number of Sw Errors to be disabled*/ + Sw_Errors_count = <0>; + /* List of SW errors to disable*/ + /*Entry should be< >*/ + /* Sample :<0x8000 0xFFFFFFFF>;*/ + /*If the Error code mask is 0xFFFFFFFF then all the error code from the reporter ID will be disabled*/ + Sw_Errors = <>; + }; + + FsiComAppChConfCcplexApp { + compatible = "nvidia,tegra-fsicom-CcplexApp"; + status = "okay"; + channelid_list = <1 2>; + }; + + safetyservices_epl_client { /* userspace app uses this driver to send error code */ + compatible = "nvidia,tegra234-epl-client"; + /* + * FSI FW version 1.1.1 uses HSP 1, while latest FSI FW version + * 1.5.4 onwards will be using HSP 0. + */ +#if TEGRA_HSP_DT_VERSION >= DT_VERSION_2 + mboxes = + <&hsp_top2 (TEGRA_HSP_MBOX_TYPE_SM | TEGRA_HSP_MBOX_TYPE_SM_128BIT) TEGRA_HSP_SM_TX(0)>; +#else + mboxes = + <&hsp_top2 TEGRA_HSP_MBOX_TYPE_SM_128BIT TEGRA_HSP_SM_TX(0)>; +#endif + mbox-names = "epl-tx"; + + reg = <0x0 0x00110000 0x0 0x4>, + <0x0 0x00110004 0x0 0x4>, + <0x0 0x00120000 0x0 0x4>, + <0x0 0x00120004 0x0 0x4>, + <0x0 0x00130000 0x0 0x4>, + <0x0 0x00130004 0x0 0x4>, + <0x0 0x00140000 0x0 0x4>, + <0x0 0x00140004 0x0 0x4>, + <0x0 0x00150000 0x0 0x4>, + <0x0 0x00150004 0x0 0x4>, + <0x0 0x024e0038 0x0 0x4>; + + /* Device driver's name for reporting errors via MISCREG_MISC_EC_ERR0_SW_ERR_CODE_0 */ + client-misc-sw-generic-err0 = "fsicom_client"; + /* Device driver's name for reporting errors via MISCREG_MISC_EC_ERR1_SW_ERR_CODE_0 */ + client-misc-sw-generic-err1 = "gk20b"; + /* Device driver's name for reporting errors via MISCREG_MISC_EC_ERR3_SW_ERR_CODE_0 */ + client-misc-sw-generic-err3 = "gk20d"; + /* Device driver's name for reporting errors via MISCREG_MISC_EC_ERR4_SW_ERR_CODE_0 */ + client-misc-sw-generic-err4 = "gk20e"; + + status = "okay"; + }; + + /* + * FIXME: there is no auto dtsi which enables this driver, and its mailbox + * shown in its yaml file overlaps with the fsicom_client driver. Keeping + * this disabled till it gets sorted out. + */ + hsierrrptinj { + compatible = "nvidia,tegra234-hsierrrptinj"; + status = "disabled"; }; }; diff --git a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3701-0002-p3740-0002.dts b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3701-0002-p3740-0002.dts index 5d3ce2cdfe..957c345ee8 100644 --- a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3701-0002-p3740-0002.dts +++ b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3701-0002-p3740-0002.dts @@ -1,7 +1,7 @@ /* * Top level DTS file for CVM:P3701-0002 and CVB:P3704-0002. * - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,7 +16,9 @@ /dts-v1/; #include "cvm/tegra234-p3701-0002.dtsi" #include "cvb/tegra234-p3740-0002-b00.dtsi" +#include "cvb/tegra234-pwm-fan.dtsi" #include +#include #include #include "tegra234-dcb-p3701-0000-a02-p3740-0000-a00.dtsi" @@ -40,4 +42,8 @@ display@13800000 { status = "okay"; }; + + tegra_soc_hwpm { + status = "okay"; + }; }; diff --git a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3740-camera-hawk-owl-overlay.dts b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3740-camera-hawk-owl-overlay.dts index ed31831020..d463a6b962 100644 --- a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3740-camera-hawk-owl-overlay.dts +++ b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3740-camera-hawk-owl-overlay.dts @@ -1,19 +1,30 @@ -// SPDX-License-Identifier: GPL-2.0-only /* - * Jetson Device-tree overlay for Camera Hawk & Owl on t23x platforms + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ +/* + * Device-tree overlay for Hawk & Owl. + */ /dts-v1/; /plugin/; / { overlay-name = "Jetson Camera Hawk-Owl p3783 module"; - jetson-header-name = "Jetson AGX CSI Connector"; - compatible = "nvidia,p3740-0000+p3701-0000"; + jetson-header-name = "Jetson 122pin CSI Connector"; + compatible = "nvidia,p3740-0000+p3701-0000", "nvidia,p3740-0002-b01+p3701-0002"; /* VI number of channels */ fragment@0 { diff --git a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3740-camera-p3785-overlay.dts b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3740-camera-p3785-overlay.dts index b733bb50c7..61532ad7c7 100644 --- a/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3740-camera-p3785-overlay.dts +++ b/hardware/nvidia/platform/t23x/prometheus/kernel-dts/tegra234-p3740-camera-p3785-overlay.dts @@ -1,9 +1,21 @@ -// SPDX-License-Identifier: GPL-2.0-only /* - * Jetson Device-tree overlay for Camera Hawk & Owl on t23x platforms + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. * - * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* + * Device-tree overlay for P3785. */ @@ -12,8 +24,8 @@ / { overlay-name = "Jetson Camera P3785 "; - jetson-header-name = "Jetson AGX CSI Connector"; - compatible = "nvidia,p3740-0000+p3701-0000"; + jetson-header-name = "Jetson 122pin CSI Connector"; + compatible = "nvidia,p3740-0000+p3701-0000", "nvidia,p3740-0002-b01+p3701-0002"; /* VI number of channels */ fragment@0 { diff --git a/hardware/nvidia/platform/tegra/common/kernel-dts/audio/tegra186-audio-dai-links.dtsi b/hardware/nvidia/platform/tegra/common/kernel-dts/audio/tegra186-audio-dai-links.dtsi index 4bed4f563f..84fb900c92 100644 --- a/hardware/nvidia/platform/tegra/common/kernel-dts/audio/tegra186-audio-dai-links.dtsi +++ b/hardware/nvidia/platform/tegra/common/kernel-dts/audio/tegra186-audio-dai-links.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -1611,7 +1611,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF1>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN1>; }; codec { sound-dai = <&tegra_amixer MIXER_IN1>; @@ -1623,7 +1623,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF2>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN2>; }; codec { sound-dai = <&tegra_amixer MIXER_IN2>; @@ -1635,7 +1635,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF3>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN3>; }; codec { sound-dai = <&tegra_amixer MIXER_IN3>; @@ -1647,7 +1647,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF4>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN4>; }; codec { sound-dai = <&tegra_amixer MIXER_IN4>; @@ -1659,7 +1659,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF5>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN5>; }; codec { sound-dai = <&tegra_amixer MIXER_IN5>; @@ -1671,7 +1671,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF6>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN6>; }; codec { sound-dai = <&tegra_amixer MIXER_IN6>; @@ -1683,7 +1683,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF7>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN7>; }; codec { sound-dai = <&tegra_amixer MIXER_IN7>; @@ -1695,7 +1695,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF8>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN8>; }; codec { sound-dai = <&tegra_amixer MIXER_IN8>; @@ -1707,7 +1707,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF9>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN9>; }; codec { sound-dai = <&tegra_amixer MIXER_IN9>; @@ -1719,7 +1719,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF10>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN10>; }; codec { sound-dai = <&tegra_amixer MIXER_IN10>; @@ -1734,7 +1734,7 @@ sound-dai = <&tegra_amixer MIXER_OUT1>; }; codec { - sound-dai = <&tegra_axbar XBAR_MIXER_IF1>; + sound-dai = <&tegra_axbar XBAR_MIXER_OUT1>; }; }; @@ -1746,7 +1746,7 @@ sound-dai = <&tegra_amixer MIXER_OUT2>; }; codec { - sound-dai = <&tegra_axbar XBAR_MIXER_IF2>; + sound-dai = <&tegra_axbar XBAR_MIXER_OUT2>; }; }; @@ -1758,7 +1758,7 @@ sound-dai = <&tegra_amixer MIXER_OUT3>; }; codec { - sound-dai = <&tegra_axbar XBAR_MIXER_IF3>; + sound-dai = <&tegra_axbar XBAR_MIXER_OUT3>; }; }; @@ -1770,7 +1770,7 @@ sound-dai = <&tegra_amixer MIXER_OUT4>; }; codec { - sound-dai = <&tegra_axbar XBAR_MIXER_IF4>; + sound-dai = <&tegra_axbar XBAR_MIXER_OUT4>; }; }; @@ -1782,7 +1782,7 @@ sound-dai = <&tegra_amixer MIXER_OUT5>; }; codec { - sound-dai = <&tegra_axbar XBAR_MIXER_IF5>; + sound-dai = <&tegra_axbar XBAR_MIXER_OUT5>; }; }; @@ -1791,7 +1791,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_SFC1>; + sound-dai = <&tegra_axbar XBAR_SFC1_RX>; }; codec { sound-dai = <&tegra_sfc1 SFC_IN>; @@ -1804,7 +1804,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_SFC2>; + sound-dai = <&tegra_axbar XBAR_SFC2_RX>; }; codec { sound-dai = <&tegra_sfc2 SFC_IN>; @@ -1817,7 +1817,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_SFC3>; + sound-dai = <&tegra_axbar XBAR_SFC3_RX>; }; codec { sound-dai = <&tegra_sfc3 SFC_IN>; @@ -1830,7 +1830,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_SFC4>; + sound-dai = <&tegra_axbar XBAR_SFC4_RX>; }; codec { sound-dai = <&tegra_sfc4 SFC_IN>; @@ -1846,7 +1846,7 @@ sound-dai = <&tegra_sfc1 SFC_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_SFC1>; + sound-dai = <&tegra_axbar XBAR_SFC1_TX>; }; }; @@ -1858,7 +1858,7 @@ sound-dai = <&tegra_sfc2 SFC_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_SFC2>; + sound-dai = <&tegra_axbar XBAR_SFC2_TX>; }; }; @@ -1870,7 +1870,7 @@ sound-dai = <&tegra_sfc3 SFC_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_SFC3>; + sound-dai = <&tegra_axbar XBAR_SFC3_TX>; }; }; @@ -1882,7 +1882,7 @@ sound-dai = <&tegra_sfc4 SFC_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_SFC4>; + sound-dai = <&tegra_axbar XBAR_SFC4_TX>; }; }; @@ -2041,7 +2041,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MVC1>; + sound-dai = <&tegra_axbar XBAR_MVC1_RX>; }; codec { sound-dai = <&tegra_mvc1 MVC_IN>; @@ -2054,7 +2054,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MVC2>; + sound-dai = <&tegra_axbar XBAR_MVC2_RX>; }; codec { sound-dai = <&tegra_mvc2 MVC_IN>; @@ -2070,7 +2070,7 @@ sound-dai = <&tegra_mvc1 MVC_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_MVC1>; + sound-dai = <&tegra_axbar XBAR_MVC1_TX>; }; }; @@ -2082,7 +2082,7 @@ sound-dai = <&tegra_mvc2 MVC_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_MVC2>; + sound-dai = <&tegra_axbar XBAR_MVC2_TX>; }; }; @@ -2091,7 +2091,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_OPE1>; + sound-dai = <&tegra_axbar XBAR_OPE1_RX>; }; codec { sound-dai = <&tegra_ope1 OPE_IN>; @@ -2107,7 +2107,7 @@ sound-dai = <&tegra_ope1 OPE_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_OPE1>; + sound-dai = <&tegra_axbar XBAR_OPE1_TX>; }; }; @@ -2116,7 +2116,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_ASRC_IF1>; + sound-dai = <&tegra_axbar XBAR_ASRC_IN1>; }; codec { sound-dai = <&tegra_asrc ASRC_IN1>; @@ -2129,7 +2129,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_ASRC_IF2>; + sound-dai = <&tegra_axbar XBAR_ASRC_IN2>; }; codec { sound-dai = <&tegra_asrc ASRC_IN2>; @@ -2142,7 +2142,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_ASRC_IF3>; + sound-dai = <&tegra_axbar XBAR_ASRC_IN3>; }; codec { sound-dai = <&tegra_asrc ASRC_IN3>; @@ -2155,7 +2155,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_ASRC_IF4>; + sound-dai = <&tegra_axbar XBAR_ASRC_IN4>; }; codec { sound-dai = <&tegra_asrc ASRC_IN4>; @@ -2168,7 +2168,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_ASRC_IF5>; + sound-dai = <&tegra_axbar XBAR_ASRC_IN5>; }; codec { sound-dai = <&tegra_asrc ASRC_IN5>; @@ -2181,7 +2181,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_ASRC_IF6>; + sound-dai = <&tegra_axbar XBAR_ASRC_IN6>; }; codec { sound-dai = <&tegra_asrc ASRC_IN6>; @@ -2194,7 +2194,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_ASRC_IF7>; + sound-dai = <&tegra_axbar XBAR_ASRC_IN7>; }; codec { sound-dai = <&tegra_asrc ASRC_IN7>; @@ -2210,7 +2210,7 @@ sound-dai = <&tegra_asrc ASRC_OUT1>; }; codec { - sound-dai = <&tegra_axbar XBAR_ASRC_IF1>; + sound-dai = <&tegra_axbar XBAR_ASRC_OUT1>; }; }; @@ -2222,7 +2222,7 @@ sound-dai = <&tegra_asrc ASRC_OUT2>; }; codec { - sound-dai = <&tegra_axbar XBAR_ASRC_IF2>; + sound-dai = <&tegra_axbar XBAR_ASRC_OUT2>; }; }; @@ -2234,7 +2234,7 @@ sound-dai = <&tegra_asrc ASRC_OUT3>; }; codec { - sound-dai = <&tegra_axbar XBAR_ASRC_IF3>; + sound-dai = <&tegra_axbar XBAR_ASRC_OUT3>; }; }; @@ -2246,7 +2246,7 @@ sound-dai = <&tegra_asrc ASRC_OUT4>; }; codec { - sound-dai = <&tegra_axbar XBAR_ASRC_IF4>; + sound-dai = <&tegra_axbar XBAR_ASRC_OUT4>; }; }; @@ -2258,7 +2258,7 @@ sound-dai = <&tegra_asrc ASRC_OUT5>; }; codec { - sound-dai = <&tegra_axbar XBAR_ASRC_IF5>; + sound-dai = <&tegra_axbar XBAR_ASRC_OUT5>; }; }; @@ -2270,7 +2270,7 @@ sound-dai = <&tegra_asrc ASRC_OUT6>; }; codec { - sound-dai = <&tegra_axbar XBAR_ASRC_IF6>; + sound-dai = <&tegra_axbar XBAR_ASRC_OUT6>; }; }; diff --git a/hardware/nvidia/platform/tegra/common/kernel-dts/audio/tegra186-audio-graph.dtsi b/hardware/nvidia/platform/tegra/common/kernel-dts/audio/tegra186-audio-graph.dtsi index 1e84515ab7..7f8617c5d4 100644 --- a/hardware/nvidia/platform/tegra/common/kernel-dts/audio/tegra186-audio-graph.dtsi +++ b/hardware/nvidia/platform/tegra/common/kernel-dts/audio/tegra186-audio-graph.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -282,118 +282,158 @@ }; }; - xbar_sfc1_in_port: port@XBAR_SFC1 { - reg = ; + xbar_sfc1_in_port: port@XBAR_SFC1_RX { + reg = ; xbar_sfc1_in_ep: endpoint { remote-endpoint = <&sfc1_in_cif_ep>; }; }; - xbar_sfc2_in_port: port@XBAR_SFC2 { - reg = ; + xbar_sfc2_in_port: port@XBAR_SFC2_RX { + reg = ; xbar_sfc2_in_ep: endpoint { remote-endpoint = <&sfc2_in_cif_ep>; }; }; - xbar_sfc3_in_port: port@XBAR_SFC3 { - reg = ; + xbar_sfc3_in_port: port@XBAR_SFC3_RX { + reg = ; xbar_sfc3_in_ep: endpoint { remote-endpoint = <&sfc3_in_cif_ep>; }; }; - xbar_sfc4_in_port: port@XBAR_SFC4 { - reg = ; + xbar_sfc4_in_port: port@XBAR_SFC4_RX { + reg = ; xbar_sfc4_in_ep: endpoint { remote-endpoint = <&sfc4_in_cif_ep>; }; }; - xbar_mixer_in1_port: port@XBAR_MIXER_IF1 { - reg = ; + xbar_mixer_in1_port: port@XBAR_MIXER_IN1 { + reg = ; xbar_mixer_in1_ep: endpoint { remote-endpoint = <&mixer_in1_cif_ep>; }; }; - xbar_mixer_in2_port: port@XBAR_MIXER_IF2 { - reg = ; + xbar_mixer_in2_port: port@XBAR_MIXER_IN2 { + reg = ; xbar_mixer_in2_ep: endpoint { remote-endpoint = <&mixer_in2_cif_ep>; }; }; - xbar_mixer_in3_port: port@XBAR_MIXER_IF3 { - reg = ; + xbar_mixer_in3_port: port@XBAR_MIXER_IN3 { + reg = ; xbar_mixer_in3_ep: endpoint { remote-endpoint = <&mixer_in3_cif_ep>; }; }; - xbar_mixer_in4_port: port@XBAR_MIXER_IF4 { - reg = ; + xbar_mixer_in4_port: port@XBAR_MIXER_IN4 { + reg = ; xbar_mixer_in4_ep: endpoint { remote-endpoint = <&mixer_in4_cif_ep>; }; }; - xbar_mixer_in5_port: port@XBAR_MIXER_IF5 { - reg = ; + xbar_mixer_in5_port: port@XBAR_MIXER_IN5 { + reg = ; xbar_mixer_in5_ep: endpoint { remote-endpoint = <&mixer_in5_cif_ep>; }; }; - xbar_mixer_in6_port: port@XBAR_MIXER_IF6 { - reg = ; + xbar_mixer_in6_port: port@XBAR_MIXER_IN6 { + reg = ; xbar_mixer_in6_ep: endpoint { remote-endpoint = <&mixer_in6_cif_ep>; }; }; - xbar_mixer_in7_port: port@XBAR_MIXER_IF7 { - reg = ; + xbar_mixer_in7_port: port@XBAR_MIXER_IN7 { + reg = ; xbar_mixer_in7_ep: endpoint { remote-endpoint = <&mixer_in7_cif_ep>; }; }; - xbar_mixer_in8_port: port@XBAR_MIXER_IF8 { - reg = ; + xbar_mixer_in8_port: port@XBAR_MIXER_IN8 { + reg = ; xbar_mixer_in8_ep: endpoint { remote-endpoint = <&mixer_in8_cif_ep>; }; }; - xbar_mixer_in9_port: port@XBAR_MIXER_IF9 { - reg = ; + xbar_mixer_in9_port: port@XBAR_MIXER_IN9 { + reg = ; xbar_mixer_in9_ep: endpoint { remote-endpoint = <&mixer_in9_cif_ep>; }; }; - xbar_mixer_in10_port: port@XBAR_MIXER_IF10 { - reg = ; + xbar_mixer_in10_port: port@XBAR_MIXER_IN10 { + reg = ; xbar_mixer_in10_ep: endpoint { remote-endpoint = <&mixer_in10_cif_ep>; }; }; + xbar_mixer_out1_port: port@XBAR_MIXER_OUT1 { + reg = ; + + xbar_mixer_out1_ep: endpoint { + remote-endpoint = <&mixer_out1_cif_ep>; + }; + }; + + xbar_mixer_out2_port: port@XBAR_MIXER_OUT2 { + reg = ; + + xbar_mixer_out2_ep: endpoint { + remote-endpoint = <&mixer_out2_cif_ep>; + }; + }; + + xbar_mixer_out3_port: port@XBAR_MIXER_OUT3 { + reg = ; + + xbar_mixer_out3_ep: endpoint { + remote-endpoint = <&mixer_out3_cif_ep>; + }; + }; + + xbar_mixer_out4_port: port@XBAR_MIXER_OUT4 { + reg = ; + + xbar_mixer_out4_ep: endpoint { + remote-endpoint = <&mixer_out4_cif_ep>; + }; + }; + + xbar_mixer_out5_port: port@XBAR_MIXER_OUT5 { + reg = ; + + xbar_mixer_out5_ep: endpoint { + remote-endpoint = <&mixer_out5_cif_ep>; + }; + }; + xbar_afc1_in_port: port@XBAR_AFC1 { reg = ; @@ -442,24 +482,24 @@ }; }; - xbar_ope1_in_port: port@XBAR_OPE1 { - reg = ; + xbar_ope1_in_port: port@XBAR_OPE1_RX { + reg = ; xbar_ope1_in_ep: endpoint { remote-endpoint = <&ope1_in_cif_ep>; }; }; - xbar_mvc1_in_port: port@XBAR_MVC1 { - reg = ; + xbar_mvc1_in_port: port@XBAR_MVC1_RX { + reg = ; xbar_mvc1_in_ep: endpoint { remote-endpoint = <&mvc1_in_cif_ep>; }; }; - xbar_mvc2_in_port: port@XBAR_MVC2 { - reg = ; + xbar_mvc2_in_port: port@XBAR_MVC2_RX { + reg = ; xbar_mvc2_in_ep: endpoint { remote-endpoint = <&mvc2_in_cif_ep>; @@ -786,62 +826,110 @@ }; }; - xbar_asrc_in1_port: port@XBAR_ASRC_IF1 { - reg = ; + xbar_asrc_in1_port: port@XBAR_ASRC_IN1 { + reg = ; xbar_asrc_in1_ep: endpoint { remote-endpoint = <&asrc_in1_cif_ep>; }; }; - xbar_asrc_in2_port: port@XBAR_ASRC_IF2 { - reg = ; + xbar_asrc_in2_port: port@XBAR_ASRC_IN2 { + reg = ; xbar_asrc_in2_ep: endpoint { remote-endpoint = <&asrc_in2_cif_ep>; }; }; - xbar_asrc_in3_port: port@XBAR_ASRC_IF3 { - reg = ; + xbar_asrc_in3_port: port@XBAR_ASRC_IN3 { + reg = ; xbar_asrc_in3_ep: endpoint { remote-endpoint = <&asrc_in3_cif_ep>; }; }; - xbar_asrc_in4_port: port@XBAR_ASRC_IF4 { - reg = ; + xbar_asrc_in4_port: port@XBAR_ASRC_IN4 { + reg = ; xbar_asrc_in4_ep: endpoint { remote-endpoint = <&asrc_in4_cif_ep>; }; }; - xbar_asrc_in5_port: port@XBAR_ASRC_IF5 { - reg = ; + xbar_asrc_in5_port: port@XBAR_ASRC_IN5 { + reg = ; xbar_asrc_in5_ep: endpoint { remote-endpoint = <&asrc_in5_cif_ep>; }; }; - xbar_asrc_in6_port: port@XBAR_ASRC_IF6 { - reg = ; + xbar_asrc_in6_port: port@XBAR_ASRC_IN6 { + reg = ; xbar_asrc_in6_ep: endpoint { remote-endpoint = <&asrc_in6_cif_ep>; }; }; - xbar_asrc_in7_port: port@XBAR_ASRC_IF7 { - reg = ; + xbar_asrc_in7_port: port@XBAR_ASRC_IN7 { + reg = ; xbar_asrc_in7_ep: endpoint { remote-endpoint = <&asrc_in7_cif_ep>; }; }; + xbar_asrc_out1_port: port@XBAR_ASRC_OUT1 { + reg = ; + + xbar_asrc_out1_ep: endpoint { + remote-endpoint = <&asrc_out1_cif_ep>; + }; + }; + + xbar_asrc_out2_port: port@XBAR_ASRC_OUT2 { + reg = ; + + xbar_asrc_out2_ep: endpoint { + remote-endpoint = <&asrc_out2_cif_ep>; + }; + }; + + xbar_asrc_out3_port: port@XBAR_ASRC_OUT3 { + reg = ; + + xbar_asrc_out3_ep: endpoint { + remote-endpoint = <&asrc_out3_cif_ep>; + }; + }; + + xbar_asrc_out4_port: port@XBAR_ASRC_OUT4 { + reg = ; + + xbar_asrc_out4_ep: endpoint { + remote-endpoint = <&asrc_out4_cif_ep>; + }; + }; + + xbar_asrc_out5_port: port@XBAR_ASRC_OUT5 { + reg = ; + + xbar_asrc_out5_ep: endpoint { + remote-endpoint = <&asrc_out5_cif_ep>; + }; + }; + + xbar_asrc_out6_port: port@XBAR_ASRC_OUT6 { + reg = ; + + xbar_asrc_out6_ep: endpoint { + remote-endpoint = <&asrc_out6_cif_ep>; + }; + }; + xbar_arad_port: port@XBAR_ARAD { reg = ; @@ -1520,7 +1608,7 @@ reg = ; mixer_out1_cif_ep: endpoint { - remote-endpoint = <&xbar_mixer_in1_ep>; + remote-endpoint = <&xbar_mixer_out1_ep>; }; }; @@ -1528,7 +1616,7 @@ reg = ; mixer_out2_cif_ep: endpoint { - remote-endpoint = <&xbar_mixer_in2_ep>; + remote-endpoint = <&xbar_mixer_out2_ep>; }; }; @@ -1536,7 +1624,7 @@ reg = ; mixer_out3_cif_ep: endpoint { - remote-endpoint = <&xbar_mixer_in3_ep>; + remote-endpoint = <&xbar_mixer_out3_ep>; }; }; @@ -1544,15 +1632,15 @@ reg = ; mixer_out4_cif_ep: endpoint { - remote-endpoint = <&xbar_mixer_in4_ep>; + remote-endpoint = <&xbar_mixer_out4_ep>; }; }; mixer_out5_port: port@MIXER_OUT5 { reg = ; - mixer_out_cif_ep: endpoint { - remote-endpoint = <&xbar_mixer_in5_ep>; + mixer_out5_cif_ep: endpoint { + remote-endpoint = <&xbar_mixer_out5_ep>; }; }; }; @@ -2502,7 +2590,7 @@ reg = ; asrc_out1_cif_ep: endpoint { - remote-endpoint = <&xbar_asrc_in1_ep>; + remote-endpoint = <&xbar_asrc_out1_ep>; }; }; @@ -2510,7 +2598,7 @@ reg = ; asrc_out2_cif_ep: endpoint { - remote-endpoint = <&xbar_asrc_in2_ep>; + remote-endpoint = <&xbar_asrc_out2_ep>; }; }; @@ -2518,7 +2606,7 @@ reg = ; asrc_out3_cif_ep: endpoint { - remote-endpoint = <&xbar_asrc_in3_ep>; + remote-endpoint = <&xbar_asrc_out3_ep>; }; }; @@ -2526,7 +2614,7 @@ reg = ; asrc_out4_cif_ep: endpoint { - remote-endpoint = <&xbar_asrc_in4_ep>; + remote-endpoint = <&xbar_asrc_out4_ep>; }; }; @@ -2534,7 +2622,7 @@ reg = ; asrc_out5_cif_ep: endpoint { - remote-endpoint = <&xbar_asrc_in5_ep>; + remote-endpoint = <&xbar_asrc_out5_ep>; }; }; @@ -2542,7 +2630,7 @@ reg = ; asrc_out6_cif_ep: endpoint { - remote-endpoint = <&xbar_asrc_in6_ep>; + remote-endpoint = <&xbar_asrc_out6_ep>; }; }; }; diff --git a/hardware/nvidia/platform/tegra/common/kernel-dts/audio/tegra210-audio-dai-links.dtsi b/hardware/nvidia/platform/tegra/common/kernel-dts/audio/tegra210-audio-dai-links.dtsi index 914f10d65f..9f0f61f7dd 100644 --- a/hardware/nvidia/platform/tegra/common/kernel-dts/audio/tegra210-audio-dai-links.dtsi +++ b/hardware/nvidia/platform/tegra/common/kernel-dts/audio/tegra210-audio-dai-links.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * along with this program. If not, see . */ -#include +#include #define ADMAIF_FIFO(i) (TEGRA210_ADMAIF_FIFO_OFFSET + i - 1) #define ADMAIF_CIF(i) (TEGRA210_ADMAIF_CIF_OFFSET + i - 1) @@ -901,7 +901,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF1>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN1>; }; codec { sound-dai = <&tegra_amixer MIXER_IN1>; @@ -913,7 +913,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF2>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN2>; }; codec { sound-dai = <&tegra_amixer MIXER_IN2>; @@ -925,7 +925,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF3>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN3>; }; codec { sound-dai = <&tegra_amixer MIXER_IN3>; @@ -937,7 +937,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF4>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN4>; }; codec { sound-dai = <&tegra_amixer MIXER_IN4>; @@ -949,7 +949,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF5>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN5>; }; codec { sound-dai = <&tegra_amixer MIXER_IN5>; @@ -961,7 +961,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF6>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN6>; }; codec { sound-dai = <&tegra_amixer MIXER_IN6>; @@ -973,7 +973,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF7>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN7>; }; codec { sound-dai = <&tegra_amixer MIXER_IN7>; @@ -985,7 +985,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF8>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN8>; }; codec { sound-dai = <&tegra_amixer MIXER_IN8>; @@ -997,7 +997,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF9>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN9>; }; codec { sound-dai = <&tegra_amixer MIXER_IN9>; @@ -1009,7 +1009,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MIXER_IF10>; + sound-dai = <&tegra_axbar XBAR_MIXER_IN10>; }; codec { sound-dai = <&tegra_amixer MIXER_IN10>; @@ -1024,7 +1024,7 @@ sound-dai = <&tegra_amixer MIXER_OUT1>; }; codec { - sound-dai = <&tegra_axbar XBAR_MIXER_IF1>; + sound-dai = <&tegra_axbar XBAR_MIXER_OUT1>; }; }; @@ -1036,7 +1036,7 @@ sound-dai = <&tegra_amixer MIXER_OUT2>; }; codec { - sound-dai = <&tegra_axbar XBAR_MIXER_IF2>; + sound-dai = <&tegra_axbar XBAR_MIXER_OUT2>; }; }; @@ -1048,7 +1048,7 @@ sound-dai = <&tegra_amixer MIXER_OUT3>; }; codec { - sound-dai = <&tegra_axbar XBAR_MIXER_IF3>; + sound-dai = <&tegra_axbar XBAR_MIXER_OUT3>; }; }; @@ -1060,7 +1060,7 @@ sound-dai = <&tegra_amixer MIXER_OUT4>; }; codec { - sound-dai = <&tegra_axbar XBAR_MIXER_IF4>; + sound-dai = <&tegra_axbar XBAR_MIXER_OUT4>; }; }; @@ -1072,7 +1072,7 @@ sound-dai = <&tegra_amixer MIXER_OUT5>; }; codec { - sound-dai = <&tegra_axbar XBAR_MIXER_IF5>; + sound-dai = <&tegra_axbar XBAR_MIXER_OUT5>; }; }; @@ -1081,7 +1081,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_SFC1>; + sound-dai = <&tegra_axbar XBAR_SFC1_RX>; }; codec { sound-dai = <&tegra_sfc1 SFC_IN>; @@ -1094,7 +1094,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_SFC2>; + sound-dai = <&tegra_axbar XBAR_SFC2_RX>; }; codec { sound-dai = <&tegra_sfc2 SFC_IN>; @@ -1107,7 +1107,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_SFC3>; + sound-dai = <&tegra_axbar XBAR_SFC3_RX>; }; codec { sound-dai = <&tegra_sfc3 SFC_IN>; @@ -1120,7 +1120,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_SFC4>; + sound-dai = <&tegra_axbar XBAR_SFC4_RX>; }; codec { sound-dai = <&tegra_sfc4 SFC_IN>; @@ -1136,7 +1136,7 @@ sound-dai = <&tegra_sfc1 SFC_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_SFC1>; + sound-dai = <&tegra_axbar XBAR_SFC1_TX>; }; }; @@ -1148,7 +1148,7 @@ sound-dai = <&tegra_sfc2 SFC_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_SFC2>; + sound-dai = <&tegra_axbar XBAR_SFC2_TX>; }; }; @@ -1160,7 +1160,7 @@ sound-dai = <&tegra_sfc3 SFC_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_SFC3>; + sound-dai = <&tegra_axbar XBAR_SFC3_TX>; }; }; @@ -1172,7 +1172,7 @@ sound-dai = <&tegra_sfc4 SFC_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_SFC4>; + sound-dai = <&tegra_axbar XBAR_SFC4_TX>; }; }; @@ -1331,7 +1331,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MVC1>; + sound-dai = <&tegra_axbar XBAR_MVC1_RX>; }; codec { sound-dai = <&tegra_mvc1 MVC_IN>; @@ -1344,7 +1344,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_MVC2>; + sound-dai = <&tegra_axbar XBAR_MVC2_RX>; }; codec { sound-dai = <&tegra_mvc2 MVC_IN>; @@ -1360,7 +1360,7 @@ sound-dai = <&tegra_mvc1 MVC_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_MVC1>; + sound-dai = <&tegra_axbar XBAR_MVC1_TX>; }; }; @@ -1372,7 +1372,7 @@ sound-dai = <&tegra_mvc2 MVC_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_MVC2>; + sound-dai = <&tegra_axbar XBAR_MVC2_TX>; }; }; @@ -1381,7 +1381,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_OPE1>; + sound-dai = <&tegra_axbar XBAR_OPE1_RX>; }; codec { sound-dai = <&tegra_ope1 OPE_IN>; @@ -1394,7 +1394,7 @@ link-type = ; cpu { - sound-dai = <&tegra_axbar XBAR_OPE2>; + sound-dai = <&tegra_axbar XBAR_OPE2_RX>; }; codec { sound-dai = <&tegra_ope2 OPE_IN>; @@ -1410,7 +1410,7 @@ sound-dai = <&tegra_ope1 OPE_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_OPE1>; + sound-dai = <&tegra_axbar XBAR_OPE1_TX>; }; }; @@ -1422,7 +1422,7 @@ sound-dai = <&tegra_ope2 OPE_OUT>; }; codec { - sound-dai = <&tegra_axbar XBAR_OPE2>; + sound-dai = <&tegra_axbar XBAR_OPE2_TX>; }; }; }; diff --git a/hardware/nvidia/soc/t19x/kernel-dts/tegra194-soc/tegra194-soc-audio.dtsi b/hardware/nvidia/soc/t19x/kernel-dts/tegra194-soc/tegra194-soc-audio.dtsi index aea963ea9a..4da5c39756 100644 --- a/hardware/nvidia/soc/t19x/kernel-dts/tegra194-soc/tegra194-soc-audio.dtsi +++ b/hardware/nvidia/soc/t19x/kernel-dts/tegra194-soc/tegra194-soc-audio.dtsi @@ -285,6 +285,7 @@ reg = <0x0 0x290bb00 0x0 0x800>; nvidia,ahub-amixer-id = <0>; #sound-dai-cells = <1>; + sound-name-prefix = "MIXER1"; status = "disabled"; }; diff --git a/hardware/nvidia/soc/t19x/kernel-dts/tegra194-soc/tegra194-soc-base.dtsi b/hardware/nvidia/soc/t19x/kernel-dts/tegra194-soc/tegra194-soc-base.dtsi index d15d07292f..00783d6a9a 100644 --- a/hardware/nvidia/soc/t19x/kernel-dts/tegra194-soc/tegra194-soc-base.dtsi +++ b/hardware/nvidia/soc/t19x/kernel-dts/tegra194-soc/tegra194-soc-base.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -2114,7 +2114,7 @@ <&bpmp_clks TEGRA194_CLK_NAFLL_CVNAS>; clock-names = "emc", "nafll_dla", "nafll_dla_falcon", "nafll_pva_vps", "nafll_pva_core", "nafll_cvnas"; - status = "okay"; + status = "disabled"; }; external-connection { diff --git a/hardware/nvidia/soc/t19x/kernel-dts/tegra194-soc/tegra194-soc-eqos.dtsi b/hardware/nvidia/soc/t19x/kernel-dts/tegra194-soc/tegra194-soc-eqos.dtsi index f0a6b55847..7f2709a0f9 100644 --- a/hardware/nvidia/soc/t19x/kernel-dts/tegra194-soc/tegra194-soc-eqos.dtsi +++ b/hardware/nvidia/soc/t19x/kernel-dts/tegra194-soc/tegra194-soc-eqos.dtsi @@ -65,7 +65,7 @@ ethernet@2490000 { compatible = "nvidia,nveqos"; reg = <0x0 0x02490000 0x0 0x10000>; /* EQOS Base Register */ - reg-names = "mac-base"; + reg-names = "mac"; interrupts = <0 194 0x4>, /* common */ <0 186 0x4>, /* tx0 */ <0 187 0x4>, /* tx1 */ @@ -87,10 +87,10 @@ #if TEGRA_IOMMU_DT_VERSION >= DT_VERSION_2 interconnects = <&mc TEGRA194_MEMORY_CLIENT_EQOSR>, <&mc TEGRA194_MEMORY_CLIENT_EQOSW>; - interconnect-names = "dma-mem", "dma-mem"; + interconnect-names = "dma-mem", "write"; #endif - reset-names = "mac_rst"; + reset-names = "mac"; nvidia,promisc_mode = <1>; nvidia,num-dma-chans = <4>; nvidia,dma-chans = <0 1 2 3>; diff --git a/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-audio.dtsi b/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-audio.dtsi index 6c62fe7789..0d8742a9a3 100644 --- a/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-audio.dtsi +++ b/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-audio.dtsi @@ -274,6 +274,7 @@ reg = <0x0 0x290bb00 0x0 0x800>; nvidia,ahub-amixer-id = <0>; #sound-dai-cells = <1>; + sound-name-prefix = "MIXER1"; status = "disabled"; }; diff --git a/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-base.dtsi b/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-base.dtsi index 32f5e8bb1c..1cf75f2773 100644 --- a/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-base.dtsi +++ b/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-base.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -911,7 +911,7 @@ compatible = "nvidia,nvpmodel"; clocks = <&bpmp_clks TEGRA234_CLK_EMC>; clock-names = "emc"; - status = "okay"; + status = "disabled"; }; nvdisplay: display@13800000 { diff --git a/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-eqos.dtsi b/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-eqos.dtsi index d498913030..244dd15773 100644 --- a/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-eqos.dtsi +++ b/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-eqos.dtsi @@ -67,7 +67,7 @@ reg = <0x0 0x02310000 0x0 0x10000>, /* EQOS Base Register */ <0x0 0x023D0000 0x0 0x10000>, /* MACSEC Base Register */ <0x0 0x02300000 0x0 0x10000>; /* HV Base Register */ - reg-names = "mac-base", "macsec-base", "hv-base"; + reg-names = "mac", "macsec-base", "hypervisor"; interrupts = <0 194 0x4>, /* common */ <0 186 0x4>, /* vm0 */ <0 187 0x4>, /* vm1 */ @@ -79,7 +79,7 @@ "macsec-ns-irq", "macsec-s-irq"; resets = <&bpmp_resets TEGRA234_RESET_EQOS>, <&bpmp_resets TEGRA234_RESET_EQOS_MACSEC>; /* MACsec non-secure reset */ - reset-names = "mac_rst", "macsec_ns_rst"; + reset-names = "mac", "macsec_ns_rst"; clocks = <&bpmp_clks TEGRA234_CLK_PLLREFE_VCOOUT>, <&bpmp_clks TEGRA234_CLK_EQOS_AXI>, <&bpmp_clks TEGRA234_CLK_EQOS_RX>, @@ -99,7 +99,7 @@ #if TEGRA_IOMMU_DT_VERSION >= DT_VERSION_2 interconnects = <&mc TEGRA234_MEMORY_CLIENT_EQOSR>, <&mc TEGRA234_MEMORY_CLIENT_EQOSW>; - interconnect-names = "dma-mem", "dma-mem"; + interconnect-names = "dma-mem", "write"; #endif iommus = <&smmu_niso1 TEGRA_SID_NISO1_EQOS>; nvidia,num-dma-chans = <8>; @@ -130,5 +130,7 @@ pinctrl-0 = <&eqos_mii_rx_input_state_disable>; pinctrl-1 = <&eqos_mii_rx_input_state_enable>; dma-coherent; + nvidia,dma_rx_ring_sz = <1024>; + nvidia,dma_tx_ring_sz = <1024>; }; }; diff --git a/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-mgbe.dtsi b/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-mgbe.dtsi index 2162db067d..b4e36dfc77 100644 --- a/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-mgbe.dtsi +++ b/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-mgbe.dtsi @@ -54,7 +54,7 @@ <0x0 0x068A0000 0x0 0x10000>, /* XPCS base */ <0x0 0x068D0000 0x0 0x10000>, /* MACsec RM base */ <0x0 0x06800000 0x0 0x10000>; /* HV base */ - reg-names = "mac-base", "xpcs-base", "macsec-base", "hv-base"; + reg-names = "mac", "xpcs", "macsec-base", "hypervisor"; interrupts = <0 384 0x4>, /* common */ <0 385 0x4>, /* vm0 */ <0 386 0x4>, /* vm1 */ @@ -68,7 +68,7 @@ resets = <&bpmp_resets TEGRA234_RESET_MGBE0_MAC>, <&bpmp_resets TEGRA234_RESET_MGBE0_PCS>, <&bpmp_resets TEGRA234_RESET_MGBE0_MACSEC>; /* MACsec non-secure reset */ - reset-names = "mac_rst", "xpcs_rst", "macsec_ns_rst"; + reset-names = "mac", "pcs", "macsec_ns_rst"; clocks = <&bpmp_clks TEGRA234_CLK_MGBE0_RX_INPUT_M>, <&bpmp_clks TEGRA234_CLK_MGBE0_RX_PCS_M>, <&bpmp_clks TEGRA234_CLK_MGBE0_RX_PCS_INPUT>, @@ -82,14 +82,14 @@ <&bpmp_clks TEGRA234_CLK_MGBE0_PTP_REF>, <&bpmp_clks TEGRA234_CLK_MGBE0_MACSEC>, <&bpmp_clks TEGRA234_CLK_MGBE0_RX_INPUT>; - clock-names = "rx_input_m", "rx_pcs_m", "rx_pcs_input", - "rx_pcs", "tx", "tx_pcs", "mac_divider", - "mac", "eee_pcs", "app", "ptp_ref", - "mgbe_macsec", "rx_input"; + clock-names = "rx-input-m", "rx-pcs-m", "rx-pcs-input", + "rx-pcs", "tx", "tx-pcs", "mac-divider", + "mac", "eee-pcs", "mgbe", "ptp-ref", + "mgbe_macsec", "rx-input"; #if TEGRA_IOMMU_DT_VERSION >= DT_VERSION_2 interconnects = <&mc TEGRA234_MEMORY_CLIENT_MGBEARD>, <&mc TEGRA234_MEMORY_CLIENT_MGBEAWR>; - interconnect-names = "dma-mem", "dma-mem"; + interconnect-names = "dma-mem", "write"; #endif iommus = <&smmu_niso0 TEGRA_SID_NISO0_MGBE>; power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEBA>; @@ -117,6 +117,8 @@ nvidia,instance_id = <0>; /* MGBE0 instance */ nvidia,ptp-rx-queue = <3>; dma-coherent; + nvidia,dma_rx_ring_sz = <4096>; + nvidia,dma_tx_ring_sz = <4096>; }; ethernet@6910000 { @@ -125,7 +127,7 @@ <0x0 0x069A0000 0x0 0x10000>, /* XPCS base */ <0x0 0x069D0000 0x0 0x10000>, /* MACsec RM base */ <0x0 0x06900000 0x0 0x10000>; /* HV base */ - reg-names = "mac-base", "xpcs-base", "macsec-base", "hv-base"; + reg-names = "mac", "xpcs", "macsec-base", "hypervisor"; interrupts = <0 392 0x4>, /* common */ <0 393 0x4>, /* vm0 */ <0 394 0x4>, /* vm1 */ @@ -139,7 +141,7 @@ resets = <&bpmp_resets TEGRA234_RESET_MGBE1_MAC>, <&bpmp_resets TEGRA234_RESET_MGBE1_PCS>, <&bpmp_resets TEGRA234_RESET_MGBE1_MACSEC>; /* MACsec non-secure reset */ - reset-names = "mac_rst", "xpcs_rst", "macsec_ns_rst"; + reset-names = "mac", "pcs", "macsec_ns_rst"; clocks = <&bpmp_clks TEGRA234_CLK_MGBE1_RX_INPUT_M>, <&bpmp_clks TEGRA234_CLK_MGBE1_RX_PCS_M>, <&bpmp_clks TEGRA234_CLK_MGBE1_RX_PCS_INPUT>, @@ -153,14 +155,14 @@ <&bpmp_clks TEGRA234_CLK_MGBE1_PTP_REF>, <&bpmp_clks TEGRA234_CLK_MGBE1_MACSEC>, <&bpmp_clks TEGRA234_CLK_MGBE1_RX_INPUT>; - clock-names = "rx_input_m", "rx_pcs_m", "rx_pcs_input", - "rx_pcs", "tx", "tx_pcs", "mac_divider", - "mac", "eee_pcs", "app", "ptp_ref", - "mgbe_macsec", "rx_input"; + clock-names = "rx-input-m", "rx-pcs-m", "rx-pcs-input", + "rx-pcs", "tx", "tx-pcs", "mac-divider", + "mac", "eee-pcs", "mgbe", "ptp-ref", + "mgbe_macsec", "rx-input"; #if TEGRA_IOMMU_DT_VERSION >= DT_VERSION_2 interconnects = <&mc TEGRA234_MEMORY_CLIENT_MGBEBRD>, <&mc TEGRA234_MEMORY_CLIENT_MGBEBWR>; - interconnect-names = "dma-mem", "dma-mem"; + interconnect-names = "dma-mem", "write"; #endif iommus = <&smmu_niso0 TEGRA_SID_NISO0_MGBE_VF1>; power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEBB>; @@ -188,6 +190,8 @@ nvidia,instance_id = <1>; /* MGBE1 instance */ nvidia,ptp-rx-queue = <3>; dma-coherent; + nvidia,dma_rx_ring_sz = <4096>; + nvidia,dma_tx_ring_sz = <4096>; }; ethernet@6A10000 { @@ -196,7 +200,7 @@ <0x0 0x06AA0000 0x0 0x10000>, /* XPCS base */ <0x0 0x06AD0000 0x0 0x10000>, /* MACsec RM base */ <0x0 0x06A00000 0x0 0x10000>; /* HV base */ - reg-names = "mac-base", "xpcs-base", "macsec-base", "hv-base"; + reg-names = "mac", "xpcs", "macsec-base", "hypervisor"; interrupts = <0 400 0x4>, /* common */ <0 401 0x4>, /* vm0 */ <0 402 0x4>, /* vm1 */ @@ -210,7 +214,7 @@ resets = <&bpmp_resets TEGRA234_RESET_MGBE2_MAC>, <&bpmp_resets TEGRA234_RESET_MGBE2_PCS>, <&bpmp_resets TEGRA234_RESET_MGBE2_MACSEC>; /* MACsec non-secure reset */ - reset-names = "mac_rst", "xpcs_rst", "macsec_ns_rst"; + reset-names = "mac", "pcs", "macsec_ns_rst"; clocks = <&bpmp_clks TEGRA234_CLK_MGBE2_RX_INPUT_M>, <&bpmp_clks TEGRA234_CLK_MGBE2_RX_PCS_M>, <&bpmp_clks TEGRA234_CLK_MGBE2_RX_PCS_INPUT>, @@ -224,14 +228,14 @@ <&bpmp_clks TEGRA234_CLK_MGBE2_PTP_REF>, <&bpmp_clks TEGRA234_CLK_MGBE2_MACSEC>, <&bpmp_clks TEGRA234_CLK_MGBE2_RX_INPUT>; - clock-names = "rx_input_m", "rx_pcs_m", "rx_pcs_input", - "rx_pcs", "tx", "tx_pcs", "mac_divider", - "mac", "eee_pcs", "app", "ptp_ref", - "mgbe_macsec", "rx_input"; + clock-names = "rx-input-m", "rx-pcs-m", "rx-pcs-input", + "rx-pcs", "tx", "tx-pcs", "mac-divider", + "mac", "eee-pcs", "mgbe", "ptp-ref", + "mgbe_macsec", "rx-input"; #if TEGRA_IOMMU_DT_VERSION >= DT_VERSION_2 interconnects = <&mc TEGRA234_MEMORY_CLIENT_MGBECRD>, <&mc TEGRA234_MEMORY_CLIENT_MGBECWR>; - interconnect-names = "dma-mem", "dma-mem"; + interconnect-names = "dma-mem", "write"; #endif iommus = <&smmu_niso0 TEGRA_SID_NISO0_MGBE_VF2>; power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEBC>; @@ -259,6 +263,8 @@ nvidia,instance_id = <2>; /* MGBE2 instance */ nvidia,ptp-rx-queue = <3>; dma-coherent; + nvidia,dma_rx_ring_sz = <4096>; + nvidia,dma_tx_ring_sz = <4096>; }; ethernet@6B10000 { @@ -267,7 +273,7 @@ <0x0 0x06BA0000 0x0 0x10000>, /* XPCS base */ <0x0 0x06BD0000 0x0 0x10000>, /* MACsec RM base */ <0x0 0x06B00000 0x0 0x10000>; /* HV base */ - reg-names = "mac-base", "xpcs-base", "macsec-base", "hv-base"; + reg-names = "mac", "xpcs", "macsec-base", "hypervisor"; interrupts = <0 408 0x4>, /* common */ <0 409 0x4>, /* vm0 */ <0 410 0x4>, /* vm1 */ @@ -281,7 +287,7 @@ resets = <&bpmp_resets TEGRA234_RESET_MGBE3_MAC>, <&bpmp_resets TEGRA234_RESET_MGBE3_PCS>, <&bpmp_resets TEGRA234_RESET_MGBE3_MACSEC>; /* MACsec non-secure reset */ - reset-names = "mac_rst", "xpcs_rst", "macsec_ns_rst"; + reset-names = "mac", "pcs", "macsec_ns_rst"; clocks = <&bpmp_clks TEGRA234_CLK_MGBE3_RX_INPUT_M>, <&bpmp_clks TEGRA234_CLK_MGBE3_RX_PCS_M>, <&bpmp_clks TEGRA234_CLK_MGBE3_RX_PCS_INPUT>, @@ -295,14 +301,14 @@ <&bpmp_clks TEGRA234_CLK_MGBE3_PTP_REF>, <&bpmp_clks TEGRA234_CLK_MGBE3_MACSEC>, <&bpmp_clks TEGRA234_CLK_MGBE3_RX_INPUT>; - clock-names = "rx_input_m", "rx_pcs_m", "rx_pcs_input", - "rx_pcs", "tx", "tx_pcs", "mac_divider", - "mac", "eee_pcs", "app", "ptp_ref", - "mgbe_macsec", "rx_input"; + clock-names = "rx-input-m", "rx-pcs-m", "rx-pcs-input", + "rx-pcs", "tx", "tx-pcs", "mac-divider", + "mac", "eee-pcs", "mgbe", "ptp-ref", + "mgbe_macsec", "rx-input"; #if TEGRA_IOMMU_DT_VERSION >= DT_VERSION_2 interconnects = <&mc TEGRA234_MEMORY_CLIENT_MGBEDRD>, <&mc TEGRA234_MEMORY_CLIENT_MGBEDWR>; - interconnect-names = "dma-mem", "dma-mem"; + interconnect-names = "dma-mem", "write"; #endif iommus = <&smmu_niso0 TEGRA_SID_NISO0_MGBE_VF3>; power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEA>; @@ -330,5 +336,7 @@ nvidia,instance_id = <3>; /* MGBE3 instance */ nvidia,ptp-rx-queue = <3>; dma-coherent; + nvidia,dma_rx_ring_sz = <4096>; + nvidia,dma_tx_ring_sz = <4096>; }; }; diff --git a/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-minimal.dtsi b/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-minimal.dtsi index 6420df3e09..f3f0c36fee 100644 --- a/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-minimal.dtsi +++ b/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-minimal.dtsi @@ -42,6 +42,8 @@ interrupt-controller; reg = <0x0 0x0f400000 0x0 0x00010000 /* GICD */ 0x0 0x0f440000 0x0 0x00200000>; /* GICR CPU 0-15 */ + interrupt-parent = <&intc>; + interrupts = ; ranges; status = "disabled"; diff --git a/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-pcie.dtsi b/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-pcie.dtsi index 86de3ee312..e6d234b242 100644 --- a/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-pcie.dtsi +++ b/hardware/nvidia/soc/t23x/kernel-dts/tegra234-soc/tegra234-soc-pcie.dtsi @@ -444,10 +444,10 @@ interrupts = <0 53 0x04>; /* controller interrupt */ interrupt-names = "intr"; - nvidia,dvfs-tbl = < 204000000 204000000 204000000 408000000 - 204000000 204000000 408000000 666000000 - 204000000 408000000 666000000 1066000000 - 408000000 666000000 1066000000 2133000000 >; + nvidia,dvfs-tbl = < 204000000 204000000 204000000 204000000 + 204000000 204000000 204000000 665600000 + 204000000 204000000 665600000 1600000000 + 204000000 665600000 1600000000 2133000000 >; nvidia,host1x = <&host1x>; nvidia,enable-ext-refclk; @@ -521,9 +521,9 @@ interrupts = <0 352 0x04>; /* controller interrupt */ interrupt-names = "intr"; - nvidia,dvfs-tbl = < 204000000 204000000 204000000 408000000 - 204000000 204000000 408000000 800000000 - 204000000 408000000 800000000 1600000000 + nvidia,dvfs-tbl = < 204000000 204000000 204000000 665600000 + 204000000 204000000 665600000 1600000000 + 204000000 665600000 1600000000 2133000000 0 0 0 0 >; nvidia,enable-ext-refclk; @@ -596,10 +596,10 @@ interrupts = <0 354 0x04>; /* controller interrupt */ interrupt-names = "intr"; - nvidia,dvfs-tbl = < 204000000 204000000 204000000 408000000 - 204000000 204000000 408000000 666000000 - 204000000 408000000 666000000 1066000000 - 408000000 666000000 1066000000 2133000000 >; + nvidia,dvfs-tbl = < 204000000 204000000 204000000 204000000 + 204000000 204000000 204000000 665600000 + 204000000 204000000 665600000 1600000000 + 204000000 665600000 1600000000 2133000000 >; nvidia,enable-ext-refclk; nvidia,max-speed = <4>; @@ -672,9 +672,9 @@ interrupts = <0 360 0x04>; /* controller interrupt */ interrupt-names = "intr"; - nvidia,dvfs-tbl = < 204000000 204000000 204000000 408000000 - 204000000 204000000 408000000 800000000 - 204000000 408000000 800000000 1600000000 + nvidia,dvfs-tbl = < 204000000 204000000 204000000 665600000 + 204000000 204000000 665600000 1600000000 + 204000000 665600000 1600000000 2133000000 0 0 0 0 >; nvidia,enable-ext-refclk; @@ -764,9 +764,9 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 0 72 0x04>; - nvidia,dvfs-tbl = < 204000000 204000000 204000000 408000000 - 204000000 204000000 408000000 800000000 - 204000000 408000000 800000000 1600000000 + nvidia,dvfs-tbl = < 204000000 204000000 204000000 665600000 + 204000000 204000000 665600000 1600000000 + 204000000 665600000 1600000000 2133000000 0 0 0 0 >; nvidia,max-speed = <4>; @@ -842,7 +842,7 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 0 45 0x04>; - nvidia,dvfs-tbl = < 204000000 408000000 800000000 1333000000 + nvidia,dvfs-tbl = < 204000000 665600000 665600000 1600000000 0 0 0 0 0 0 0 0 0 0 0 0 >; @@ -922,7 +922,7 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 0 47 0x04>; - nvidia,dvfs-tbl = < 204000000 408000000 800000000 1333000000 + nvidia,dvfs-tbl = < 204000000 665600000 665600000 1600000000 0 0 0 0 0 0 0 0 0 0 0 0 >; @@ -1001,7 +1001,7 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 0 49 0x04>; - nvidia,dvfs-tbl = < 204000000 408000000 800000000 1333000000 + nvidia,dvfs-tbl = < 204000000 665600000 665600000 1600000000 0 0 0 0 0 0 0 0 0 0 0 0 >; @@ -1080,9 +1080,9 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 0 51 0x04>; - nvidia,dvfs-tbl = < 204000000 204000000 204000000 408000000 - 204000000 204000000 408000000 800000000 - 204000000 408000000 800000000 1600000000 + nvidia,dvfs-tbl = < 204000000 204000000 204000000 665600000 + 204000000 204000000 665600000 1600000000 + 204000000 665600000 1600000000 2133000000 0 0 0 0 >; nvidia,max-speed = <4>; @@ -1159,10 +1159,10 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 0 53 0x04>; - nvidia,dvfs-tbl = < 204000000 204000000 204000000 408000000 - 204000000 204000000 408000000 666000000 - 204000000 408000000 666000000 1066000000 - 408000000 666000000 1066000000 2133000000 >; + nvidia,dvfs-tbl = < 204000000 204000000 204000000 204000000 + 204000000 204000000 204000000 665600000 + 204000000 204000000 665600000 1600000000 + 204000000 665600000 1600000000 2133000000 >; nvidia,max-speed = <4>; nvidia,disable-aspm-states = <0xf>; @@ -1239,9 +1239,9 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 0 352 0x04>; - nvidia,dvfs-tbl = < 204000000 204000000 204000000 408000000 - 204000000 204000000 408000000 800000000 - 204000000 408000000 800000000 1600000000 + nvidia,dvfs-tbl = < 204000000 204000000 204000000 665600000 + 204000000 204000000 665600000 1600000000 + 204000000 665600000 1600000000 2133000000 0 0 0 0 >; nvidia,max-speed = <4>; @@ -1319,10 +1319,10 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 0 354 0x04>; - nvidia,dvfs-tbl = < 204000000 204000000 204000000 408000000 - 204000000 204000000 408000000 666000000 - 204000000 408000000 666000000 1066000000 - 408000000 666000000 1066000000 2133000000 >; + nvidia,dvfs-tbl = < 204000000 204000000 204000000 204000000 + 204000000 204000000 204000000 665600000 + 204000000 204000000 665600000 1600000000 + 204000000 665600000 1600000000 2133000000 >; nvidia,max-speed = <4>; nvidia,disable-aspm-states = <0xf>; @@ -1353,7 +1353,7 @@ nvidia,ptm-cap-off = <0x318>; }; - /* C8 X4 */ + /* C8 X2 */ pcie_c8_rp: pcie@140a0000 { compatible = "nvidia,tegra234-pcie", "snps,dw-pcie"; power-domains = <&bpmp TEGRA234_POWER_DOMAIN_PCIEX4CA>; @@ -1369,7 +1369,7 @@ #address-cells = <3>; #size-cells = <2>; device_type = "pci"; - num-lanes = <4>; + num-lanes = <2>; num-viewport = <8>; linux,pci-domain = <8>; @@ -1399,9 +1399,9 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 0 356 0x04>; - nvidia,dvfs-tbl = < 204000000 204000000 204000000 408000000 - 204000000 204000000 408000000 800000000 - 204000000 408000000 800000000 1600000000 + nvidia,dvfs-tbl = < 204000000 204000000 665600000 1600000000 + 204000000 665600000 1600000000 2133000000 + 0 0 0 0 0 0 0 0 >; nvidia,max-speed = <4>; @@ -1433,7 +1433,7 @@ nvidia,ptm-cap-off = <0x304>; }; - /* C9 X4 */ + /* C9 X2 */ pcie_c9_rp: pcie@140c0000 { compatible = "nvidia,tegra234-pcie", "snps,dw-pcie"; power-domains = <&bpmp TEGRA234_POWER_DOMAIN_PCIEX4CB>; @@ -1449,7 +1449,7 @@ #address-cells = <3>; #size-cells = <2>; device_type = "pci"; - num-lanes = <4>; + num-lanes = <2>; num-viewport = <8>; linux,pci-domain = <9>; @@ -1479,9 +1479,9 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 0 358 0x04>; - nvidia,dvfs-tbl = < 204000000 204000000 204000000 408000000 - 204000000 204000000 408000000 800000000 - 204000000 408000000 800000000 1600000000 + nvidia,dvfs-tbl = < 204000000 204000000 665600000 1600000000 + 204000000 665600000 1600000000 2133000000 + 0 0 0 0 0 0 0 0 >; nvidia,max-speed = <4>; @@ -1559,9 +1559,9 @@ interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &intc 0 360 0x04>; - nvidia,dvfs-tbl = < 204000000 204000000 204000000 408000000 - 204000000 204000000 408000000 800000000 - 204000000 408000000 800000000 1600000000 + nvidia,dvfs-tbl = < 204000000 204000000 204000000 665600000 + 204000000 204000000 665600000 1600000000 + 204000000 665600000 1600000000 2133000000 0 0 0 0 >; nvidia,max-speed = <4>; diff --git a/hardware/nvidia/soc/tegra/kernel-include/dt-bindings/sound/tegra-asoc-dais.h b/hardware/nvidia/soc/tegra/kernel-include/dt-bindings/sound/tegra-asoc-dais.h index 00448b751f..ac1828cb73 100644 --- a/hardware/nvidia/soc/tegra/kernel-include/dt-bindings/sound/tegra-asoc-dais.h +++ b/hardware/nvidia/soc/tegra/kernel-include/dt-bindings/sound/tegra-asoc-dais.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -39,7 +39,6 @@ * in the drivers. */ -/* common XBAR dais */ #define XBAR_ADMAIF1 0 #define XBAR_ADMAIF2 1 #define XBAR_ADMAIF3 2 @@ -50,109 +49,148 @@ #define XBAR_ADMAIF8 7 #define XBAR_ADMAIF9 8 #define XBAR_ADMAIF10 9 -#define XBAR_I2S1 10 -#define XBAR_I2S2 11 -#define XBAR_I2S3 12 -#define XBAR_I2S4 13 -#define XBAR_I2S5 14 -#define XBAR_SFC1 15 -#define XBAR_SFC2 16 -#define XBAR_SFC3 17 -#define XBAR_SFC4 18 -#define XBAR_MIXER_IF1 19 -#define XBAR_MIXER_IF2 20 -#define XBAR_MIXER_IF3 21 -#define XBAR_MIXER_IF4 22 -#define XBAR_MIXER_IF5 23 -#define XBAR_MIXER_IF6 24 -#define XBAR_MIXER_IF7 25 -#define XBAR_MIXER_IF8 26 -#define XBAR_MIXER_IF9 27 -#define XBAR_MIXER_IF10 28 -#define XBAR_AFC1 29 -#define XBAR_AFC2 30 -#define XBAR_AFC3 31 -#define XBAR_AFC4 32 -#define XBAR_AFC5 33 -#define XBAR_AFC6 34 -#define XBAR_OPE1 35 -#define XBAR_SPKPROT 36 -#define XBAR_MVC1 37 -#define XBAR_MVC2 38 -#define XBAR_IQC1_1 39 -#define XBAR_IQC1_2 40 -#define XBAR_IQC2_1 41 -#define XBAR_IQC2_2 42 -#define XBAR_DMIC1 43 -#define XBAR_DMIC2 44 -#define XBAR_DMIC3 45 -#define XBAR_AMX1_OUT 46 -#define XBAR_AMX1_IN1 47 -#define XBAR_AMX1_IN2 48 -#define XBAR_AMX1_IN3 49 -#define XBAR_AMX1_IN4 50 -#define XBAR_AMX2_OUT 51 -#define XBAR_AMX2_IN1 52 -#define XBAR_AMX2_IN2 53 -#define XBAR_AMX2_IN3 54 -#define XBAR_AMX2_IN4 55 -#define XBAR_ADX1_OUT1 56 -#define XBAR_ADX1_OUT2 57 -#define XBAR_ADX1_OUT3 58 -#define XBAR_ADX1_OUT4 59 -#define XBAR_ADX1_IN 60 -#define XBAR_ADX2_OUT1 61 -#define XBAR_ADX2_OUT2 62 -#define XBAR_ADX2_OUT3 63 -#define XBAR_ADX2_OUT4 64 -#define XBAR_ADX2_IN 65 -/* Tegra210 specific XBAR DAIs */ -#define XBAR_OPE2 66 -/* Tegra186 specific XBAR DAIs */ -#define XBAR_ADMAIF11 66 -#define XBAR_ADMAIF12 67 -#define XBAR_ADMAIF13 68 -#define XBAR_ADMAIF14 69 -#define XBAR_ADMAIF15 70 -#define XBAR_ADMAIF16 71 -#define XBAR_ADMAIF17 72 -#define XBAR_ADMAIF18 73 -#define XBAR_ADMAIF19 74 -#define XBAR_ADMAIF20 75 -#define XBAR_I2S6 76 -#define XBAR_AMX3_OUT 77 -#define XBAR_AMX3_IN1 78 -#define XBAR_AMX3_IN2 79 -#define XBAR_AMX3_IN3 80 -#define XBAR_AMX3_IN4 81 -#define XBAR_AMX4_OUT 82 -#define XBAR_AMX4_IN1 83 -#define XBAR_AMX4_IN2 84 -#define XBAR_AMX4_IN3 85 -#define XBAR_AMX4_IN4 86 -#define XBAR_ADX3_OUT1 87 -#define XBAR_ADX3_OUT2 88 -#define XBAR_ADX3_OUT3 89 -#define XBAR_ADX3_OUT4 90 -#define XBAR_ADX3_IN 91 -#define XBAR_ADX4_OUT1 92 -#define XBAR_ADX4_OUT2 93 -#define XBAR_ADX4_OUT3 94 -#define XBAR_ADX4_OUT4 95 -#define XBAR_ADX4_IN 96 -#define XBAR_DMIC4 97 -#define XBAR_ASRC_IF1 98 -#define XBAR_ASRC_IF2 99 -#define XBAR_ASRC_IF3 100 -#define XBAR_ASRC_IF4 101 -#define XBAR_ASRC_IF5 102 -#define XBAR_ASRC_IF6 103 -#define XBAR_ASRC_IF7 104 -#define XBAR_ARAD 105 -#define XBAR_DSPK1 106 -#define XBAR_DSPK2 107 +#define XBAR_ADMAIF11 10 +#define XBAR_ADMAIF12 11 +#define XBAR_ADMAIF13 12 +#define XBAR_ADMAIF14 13 +#define XBAR_ADMAIF15 14 +#define XBAR_ADMAIF16 15 +#define XBAR_ADMAIF17 16 +#define XBAR_ADMAIF18 17 +#define XBAR_ADMAIF19 18 +#define XBAR_ADMAIF20 19 +#define XBAR_I2S1 20 +#define XBAR_I2S2 21 +#define XBAR_I2S3 22 +#define XBAR_I2S4 23 +#define XBAR_I2S5 24 +#define XBAR_I2S6 25 +#define XBAR_DMIC1 26 +#define XBAR_DMIC2 27 +#define XBAR_DMIC3 28 +#define XBAR_DMIC4 29 +#define XBAR_DSPK1 30 +#define XBAR_DSPK2 31 +#define XBAR_SFC1_RX 32 -/* common ADMAIF DAIs */ +/* + * TODO As per downstream kernel code there will be routing issue + * if DAI names are updated for SFC, MVC and OPE input and + * output. Due to that using single DAI with same name as downstream + * kernel for input and output and added output DAIs just to keep + * similar to upstream kernel, so that it will be easy to upstream + * later. + * + * Once the routing changes are done for above mentioned modules, + * use the commented output dai index and define output dai + * links in tegra186-audio-graph.dtsi + */ +#if 0 +#define XBAR_SFC1_TX 33 +#define XBAR_SFC2_TX 35 +#define XBAR_SFC3_TX 37 +#define XBAR_SFC4_TX 39 +#define XBAR_MVC1_TX 41 +#define XBAR_MVC2_TX 43 +#define XBAR_OPE1_TX 113 +#else +#define XBAR_SFC1_TX XBAR_SFC1_RX +#define XBAR_SFC2_TX XBAR_SFC2_RX +#define XBAR_SFC3_TX XBAR_SFC3_RX +#define XBAR_SFC4_TX XBAR_SFC4_RX +#define XBAR_MVC1_TX XBAR_MVC1_RX +#define XBAR_MVC2_TX XBAR_MVC2_RX +#define XBAR_OPE1_TX XBAR_OPE1_RX +#endif + +#define XBAR_SFC2_RX 34 +#define XBAR_SFC3_RX 36 +#define XBAR_SFC4_RX 38 +#define XBAR_MVC1_RX 40 +#define XBAR_MVC2_RX 42 +#define XBAR_AMX1_IN1 44 +#define XBAR_AMX1_IN2 45 +#define XBAR_AMX1_IN3 46 +#define XBAR_AMX1_IN4 47 +#define XBAR_AMX1_OUT 48 +#define XBAR_AMX2_IN1 49 +#define XBAR_AMX2_IN2 50 +#define XBAR_AMX2_IN3 51 +#define XBAR_AMX2_IN4 52 +#define XBAR_AMX2_OUT 53 +#define XBAR_AMX3_IN1 54 +#define XBAR_AMX3_IN2 55 +#define XBAR_AMX3_IN3 56 +#define XBAR_AMX3_IN4 57 +#define XBAR_AMX3_OUT 58 +#define XBAR_AMX4_IN1 59 +#define XBAR_AMX4_IN2 60 +#define XBAR_AMX4_IN3 61 +#define XBAR_AMX4_IN4 62 +#define XBAR_AMX4_OUT 63 +#define XBAR_ADX1_IN 64 +#define XBAR_ADX1_OUT1 65 +#define XBAR_ADX1_OUT2 66 +#define XBAR_ADX1_OUT3 67 +#define XBAR_ADX1_OUT4 68 +#define XBAR_ADX2_IN 69 +#define XBAR_ADX2_OUT1 70 +#define XBAR_ADX2_OUT2 71 +#define XBAR_ADX2_OUT3 72 +#define XBAR_ADX2_OUT4 73 +#define XBAR_ADX3_IN 74 +#define XBAR_ADX3_OUT1 75 +#define XBAR_ADX3_OUT2 76 +#define XBAR_ADX3_OUT3 77 +#define XBAR_ADX3_OUT4 78 +#define XBAR_ADX4_IN 79 +#define XBAR_ADX4_OUT1 80 +#define XBAR_ADX4_OUT2 81 +#define XBAR_ADX4_OUT3 82 +#define XBAR_ADX4_OUT4 83 +#define XBAR_MIXER_IN1 84 +#define XBAR_MIXER_IN2 85 +#define XBAR_MIXER_IN3 86 +#define XBAR_MIXER_IN4 87 +#define XBAR_MIXER_IN5 88 +#define XBAR_MIXER_IN6 89 +#define XBAR_MIXER_IN7 90 +#define XBAR_MIXER_IN8 91 +#define XBAR_MIXER_IN9 92 +#define XBAR_MIXER_IN10 93 +#define XBAR_MIXER_OUT1 94 +#define XBAR_MIXER_OUT2 95 +#define XBAR_MIXER_OUT3 96 +#define XBAR_MIXER_OUT4 97 +#define XBAR_MIXER_OUT5 98 +#define XBAR_ASRC_IN1 99 +#define XBAR_ASRC_OUT1 100 +#define XBAR_ASRC_IN2 101 +#define XBAR_ASRC_OUT2 102 +#define XBAR_ASRC_IN3 103 +#define XBAR_ASRC_OUT3 104 +#define XBAR_ASRC_IN4 105 +#define XBAR_ASRC_OUT4 106 +#define XBAR_ASRC_IN5 107 +#define XBAR_ASRC_OUT5 108 +#define XBAR_ASRC_IN6 109 +#define XBAR_ASRC_OUT6 110 +#define XBAR_ASRC_IN7 111 +#define XBAR_OPE1_RX 112 +#define XBAR_AFC1 114 +#define XBAR_AFC2 115 +#define XBAR_AFC3 116 +#define XBAR_AFC4 117 +#define XBAR_AFC5 118 +#define XBAR_AFC6 119 +#define XBAR_SPKPROT 120 +#define XBAR_IQC1_1 121 +#define XBAR_IQC1_2 122 +#define XBAR_IQC2_1 123 +#define XBAR_IQC2_2 124 +#define XBAR_ARAD 125 + +/* ADMAIF DAIs */ #define ADMAIF1 0 #define ADMAIF2 1 #define ADMAIF3 2 @@ -163,7 +201,6 @@ #define ADMAIF8 7 #define ADMAIF9 8 #define ADMAIF10 9 -/* Tegra186 specific ADMAIF DAIs */ #define ADMAIF11 10 #define ADMAIF12 11 #define ADMAIF13 12 @@ -180,7 +217,6 @@ * Offset depends on the number of ADMAIF channels for a chip. * The DAI indices for these are derived from below offsets. */ -#define TEGRA210_ADMAIF_FIFO_OFFSET 10 #define TEGRA186_ADMAIF_FIFO_OFFSET 20 /* @@ -188,7 +224,6 @@ * Offset depends on the number of ADMAIF channels for a chip. * The DAI indices for these are derived from below offsets. */ -#define TEGRA210_ADMAIF_CIF_OFFSET 20 #define TEGRA186_ADMAIF_CIF_OFFSET 40 /* I2S */ diff --git a/hardware/nvidia/soc/tegra/kernel-include/dt-bindings/sound/tegra210-asoc-dais.h b/hardware/nvidia/soc/tegra/kernel-include/dt-bindings/sound/tegra210-asoc-dais.h new file mode 100644 index 0000000000..40da4874b3 --- /dev/null +++ b/hardware/nvidia/soc/tegra/kernel-include/dt-bindings/sound/tegra210-asoc-dais.h @@ -0,0 +1,311 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* + * Author: Sameer Pujar + * + * This header provides macros for Tegra 210 ASoC sound bindings. + */ + +#ifndef __DT_TEGRA210_ASOC_DAIS_H +#define __DT_TEGRA210_ASOC_DAIS_H + +/* + * DAI links can have one of these value + * PCM_LINK : optional, if nothing is specified link is treated as PCM link + * COMPR_LINK : required, if link is used with compress device + * C2C_LINK : required, for any other back end codec-to-codec links + */ +#define PCM_LINK 0 +#define COMPR_LINK 1 +#define C2C_LINK 2 + +/* + * Following DAI indices are derived from respective module drivers. + * Thus below values have to be in sync with the DAI arrays defined + * in the drivers. + */ + +/* XBAR dais */ +#define XBAR_ADMAIF1 0 +#define XBAR_ADMAIF2 1 +#define XBAR_ADMAIF3 2 +#define XBAR_ADMAIF4 3 +#define XBAR_ADMAIF5 4 +#define XBAR_ADMAIF6 5 +#define XBAR_ADMAIF7 6 +#define XBAR_ADMAIF8 7 +#define XBAR_ADMAIF9 8 +#define XBAR_ADMAIF10 9 +#define XBAR_I2S1 10 +#define XBAR_I2S2 11 +#define XBAR_I2S3 12 +#define XBAR_I2S4 13 +#define XBAR_I2S5 14 +#define XBAR_DMIC1 15 +#define XBAR_DMIC2 16 +#define XBAR_DMIC3 17 +#define XBAR_SFC1_RX 18 + +/* + * TODO As per downstream kernel code there will be routing issue + * if DAI names are updated for SFC, MVC and OPE input and + * output. Due to that using single DAI with same name as downstream + * kernel for input and output and added output DAIs just to keep + * similar to upstream kernel, so that it will be easy to upstream + * later. + * + * Once the routing changes are done for above mentioned modules, + * use the commented output dai index and define output dai + * links in tegra186-audio-graph.dtsi + */ +#if 0 +#define XBAR_SFC1_TX 19 +#define XBAR_SFC2_TX 21 +#define XBAR_SFC3_TX 23 +#define XBAR_SFC4_TX 25 +#define XBAR_MVC1_TX 27 +#define XBAR_MVC2_TX 29 +#define XBAR_OPE1_TX 66 +#define XBAR_OPE2_TX 68 +#else +#define XBAR_SFC1_TX XBAR_SFC1_RX +#define XBAR_SFC2_TX XBAR_SFC2_RX +#define XBAR_SFC3_TX XBAR_SFC3_RX +#define XBAR_SFC4_TX XBAR_SFC4_RX +#define XBAR_MVC1_TX XBAR_MVC1_RX +#define XBAR_MVC2_TX XBAR_MVC2_RX +#define XBAR_OPE1_TX XBAR_OPE1_RX +#define XBAR_OPE2_TX XBAR_OPE2_RX +#endif + +#define XBAR_SFC2_RX 20 +#define XBAR_SFC3_RX 22 +#define XBAR_SFC4_RX 24 +#define XBAR_MVC1_RX 26 +#define XBAR_MVC2_RX 28 +#define XBAR_AMX1_IN1 30 +#define XBAR_AMX1_IN2 31 +#define XBAR_AMX1_IN3 32 +#define XBAR_AMX1_IN4 33 +#define XBAR_AMX1_OUT 34 +#define XBAR_AMX2_IN1 35 +#define XBAR_AMX2_IN2 36 +#define XBAR_AMX2_IN3 37 +#define XBAR_AMX2_IN4 38 +#define XBAR_AMX2_OUT 39 +#define XBAR_ADX1_IN 40 +#define XBAR_ADX1_OUT1 41 +#define XBAR_ADX1_OUT2 42 +#define XBAR_ADX1_OUT3 43 +#define XBAR_ADX1_OUT4 44 +#define XBAR_ADX2_IN 45 +#define XBAR_ADX2_OUT1 46 +#define XBAR_ADX2_OUT2 47 +#define XBAR_ADX2_OUT3 48 +#define XBAR_ADX2_OUT4 49 +#define XBAR_MIXER_IN1 50 +#define XBAR_MIXER_IN2 51 +#define XBAR_MIXER_IN3 52 +#define XBAR_MIXER_IN4 53 +#define XBAR_MIXER_IN5 54 +#define XBAR_MIXER_IN6 55 +#define XBAR_MIXER_IN7 56 +#define XBAR_MIXER_IN8 57 +#define XBAR_MIXER_IN9 58 +#define XBAR_MIXER_IN10 59 +#define XBAR_MIXER_OUT1 60 +#define XBAR_MIXER_OUT2 61 +#define XBAR_MIXER_OUT3 62 +#define XBAR_MIXER_OUT4 63 +#define XBAR_MIXER_OUT5 64 +#define XBAR_OPE1_RX 65 +#define XBAR_OPE2_RX 67 +#define XBAR_AFC1 69 +#define XBAR_AFC2 70 +#define XBAR_AFC3 71 +#define XBAR_AFC4 72 +#define XBAR_AFC5 73 +#define XBAR_AFC6 74 +#define XBAR_SPKPROT 75 +#define XBAR_IQC1_1 76 +#define XBAR_IQC1_2 77 +#define XBAR_IQC2_1 78 +#define XBAR_IQC2_2 79 + +/* common ADMAIF DAIs */ +#define ADMAIF1 0 +#define ADMAIF2 1 +#define ADMAIF3 2 +#define ADMAIF4 3 +#define ADMAIF5 4 +#define ADMAIF6 5 +#define ADMAIF7 6 +#define ADMAIF8 7 +#define ADMAIF9 8 +#define ADMAIF10 9 + +/* + * ADMAIF_FIFO: DAIs used for DAI links between ADMAIF and ADSP. + * Offset depends on the number of ADMAIF channels for a chip. + * The DAI indices for these are derived from below offsets. + */ +#define TEGRA210_ADMAIF_FIFO_OFFSET 10 + +/* + * ADMAIF_CIF: DAIs used for codec-to-codec links between ADMAIF and XBAR. + * Offset depends on the number of ADMAIF channels for a chip. + * The DAI indices for these are derived from below offsets. + */ +#define TEGRA210_ADMAIF_CIF_OFFSET 20 + +/* I2S */ +#define I2S_CIF 0 +#define I2S_DAP 1 +#define I2S_DUMMY 2 + +/* DMIC */ +#define DMIC_CIF 0 +#define DMIC_DAP 1 +#define DMIC_DUMMY 2 + +/* DSPK */ +#define DSPK_CIF 0 +#define DSPK_DAP 1 +#define DSPK_DUMMY 2 + +/* SFC */ +#define SFC_IN 0 +#define SFC_OUT 1 + +/* MIXER */ +#define MIXER_IN1 0 +#define MIXER_IN2 1 +#define MIXER_IN3 2 +#define MIXER_IN4 3 +#define MIXER_IN5 4 +#define MIXER_IN6 5 +#define MIXER_IN7 6 +#define MIXER_IN8 7 +#define MIXER_IN9 8 +#define MIXER_IN10 9 +#define MIXER_OUT1 10 +#define MIXER_OUT2 11 +#define MIXER_OUT3 12 +#define MIXER_OUT4 13 +#define MIXER_OUT5 14 + +/* AFC */ +#define AFC_IN 0 +#define AFC_OUT 1 + +/* OPE */ +#define OPE_IN 0 +#define OPE_OUT 1 + +/* MVC */ +#define MVC_IN 0 +#define MVC_OUT 1 + +/* AMX */ +#define AMX_IN1 0 +#define AMX_IN2 1 +#define AMX_IN3 2 +#define AMX_IN4 3 +#define AMX_OUT 4 + +/* ADX */ +#define ADX_OUT1 0 +#define ADX_OUT2 1 +#define ADX_OUT3 2 +#define ADX_OUT4 3 +#define ADX_IN 4 + +/* ASRC */ +#define ASRC_IN1 0 +#define ASRC_IN2 1 +#define ASRC_IN3 2 +#define ASRC_IN4 3 +#define ASRC_IN5 4 +#define ASRC_IN6 5 +#define ASRC_IN7 6 +#define ASRC_OUT1 7 +#define ASRC_OUT2 8 +#define ASRC_OUT3 9 +#define ASRC_OUT4 10 +#define ASRC_OUT5 11 +#define ASRC_OUT6 12 + +/* ARAD */ +#define ARAD 0 + +/* ADSP */ +#define ADSP_FE1 0 +#define ADSP_FE2 1 +#define ADSP_FE3 2 +#define ADSP_FE4 3 +#define ADSP_FE5 4 +#define ADSP_FE6 5 +#define ADSP_FE7 6 +#define ADSP_FE8 7 +#define ADSP_FE9 8 +#define ADSP_FE10 9 +#define ADSP_FE11 10 +#define ADSP_FE12 11 +#define ADSP_FE13 12 +#define ADSP_FE14 13 +#define ADSP_FE15 14 +#define ADSP_EAVB_CODEC 15 +#define ADSP_ADMAIF1 16 +#define ADSP_ADMAIF2 17 +#define ADSP_ADMAIF3 18 +#define ADSP_ADMAIF4 19 +#define ADSP_ADMAIF5 20 +#define ADSP_ADMAIF6 21 +#define ADSP_ADMAIF7 22 +#define ADSP_ADMAIF8 23 +#define ADSP_ADMAIF9 24 +#define ADSP_ADMAIF10 25 +#define ADSP_ADMAIF11 26 +#define ADSP_ADMAIF12 27 +#define ADSP_ADMAIF13 28 +#define ADSP_ADMAIF14 29 +#define ADSP_ADMAIF15 30 +#define ADSP_ADMAIF16 31 +#define ADSP_ADMAIF17 32 +#define ADSP_ADMAIF18 33 +#define ADSP_ADMAIF19 34 +#define ADSP_ADMAIF20 35 +#define ADSP_PCM1 36 +#define ADSP_PCM2 37 +#define ADSP_PCM3 38 +#define ADSP_PCM4 39 +#define ADSP_PCM5 40 +#define ADSP_PCM6 41 +#define ADSP_PCM7 42 +#define ADSP_PCM8 43 +#define ADSP_PCM9 44 +#define ADSP_PCM10 45 +#define ADSP_PCM11 46 +#define ADSP_PCM12 47 +#define ADSP_PCM13 48 +#define ADSP_PCM14 49 +#define ADSP_PCM15 50 +#define ADSP_COMPR1 51 +#define ADSP_COMPR2 52 +#define ADSP_EAVB 53 + +#endif diff --git a/hardware/nvidia/soc/tegra/kernel-include/dt-common/jetson/tegra234-p3737-0000-p3701-0000.h b/hardware/nvidia/soc/tegra/kernel-include/dt-common/jetson/tegra234-p3737-0000-p3701-0000.h index 90b64636f6..d4b74e1682 100644 --- a/hardware/nvidia/soc/tegra/kernel-include/dt-common/jetson/tegra234-p3737-0000-p3701-0000.h +++ b/hardware/nvidia/soc/tegra/kernel-include/dt-common/jetson/tegra234-p3737-0000-p3701-0000.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -20,7 +20,7 @@ #include -#define JETSON_COMPATIBLE "nvidia,p3737-0000+p3701-0000", "nvidia,p3737-0000+p3701-0004" +#define JETSON_COMPATIBLE "nvidia,p3737-0000+p3701-0000", "nvidia,p3737-0000+p3701-0004", "nvidia,p3737-0000+p3701-0005" /* SoC function name for clock signal on 40-pin header pin 7 */ #define HDR40_CLK "extperiph4" diff --git a/hardware/nvidia/soc/tegra/kernel-include/dt-common/jetson/tegra234-p3767-0000-common.h b/hardware/nvidia/soc/tegra/kernel-include/dt-common/jetson/tegra234-p3767-0000-common.h index fc75a4cafc..92e61c4639 100644 --- a/hardware/nvidia/soc/tegra/kernel-include/dt-common/jetson/tegra234-p3767-0000-common.h +++ b/hardware/nvidia/soc/tegra/kernel-include/dt-common/jetson/tegra234-p3767-0000-common.h @@ -16,7 +16,21 @@ #include -#define JETSON_COMPATIBLE "nvidia,p3509-0000+p3767-0000", "nvidia,p3768-0000+p3767-0000" +#define JETSON_COMPATIBLE_P3768 "nvidia,p3768-0000+p3767-0000", \ + "nvidia,p3768-0000+p3767-0001", \ + "nvidia,p3768-0000+p3767-0003", \ + "nvidia,p3768-0000+p3767-0004", \ + "nvidia,p3768-0000+p3767-0005" + +#define JETSON_COMPATIBLE_P3509 "nvidia,p3509-0000+p3767-0000", \ + "nvidia,p3509-0000+p3767-0001", \ + "nvidia,p3509-0000+p3767-0003", \ + "nvidia,p3509-0000+p3767-0004", \ + "nvidia,p3509-0000+p3767-0005" + +#define JETSON_COMPATIBLE JETSON_COMPATIBLE_P3768, \ + JETSON_COMPATIBLE_P3509 + /* SoC function name for clock signal on 40-pin header pin 7 */ #define HDR40_CLK "aud" /* SoC function name for I2S interface on 40-pin header pins 12, 35, 38 and 40 */ diff --git a/kernel/avt/drivers/media/i2c/avt_csi2.h b/kernel/avt/drivers/media/i2c/avt_csi2.h index 67da23c5ac..9a751b9692 100644 --- a/kernel/avt/drivers/media/i2c/avt_csi2.h +++ b/kernel/avt/drivers/media/i2c/avt_csi2.h @@ -164,7 +164,7 @@ struct avt_ctrl { /* Driver release version */ #define DRV_VER_MAJOR 5 #define DRV_VER_MINOR 1 -#define DRV_VER_PATCH 0 +#define DRV_VER_PATCH 1 #define DRV_VER_BUILD 0 #define DRIVER_VERSION STR(DRV_VER_MAJOR) "." STR(DRV_VER_MINOR) "." STR(DRV_VER_PATCH) "." STR(DRV_VER_BUILD) diff --git a/kernel/kernel-5.10/NVIDIA-REVIEWERS b/kernel/kernel-5.10/NVIDIA-REVIEWERS deleted file mode 100644 index 9bda0aae18..0000000000 --- a/kernel/kernel-5.10/NVIDIA-REVIEWERS +++ /dev/null @@ -1,440 +0,0 @@ -This is a reviewers file that can be parsed by get_nv_reviewers.py that -internally uses scripts/get_maintainer.pl from kernel. - -See the MAINTAINERS file in the Linux kernel source tree for details of the -file format. The file format is defined by the upstream Linux kernel community, -so don't modify it without upstreaming any changes to get_maintainer.pl. - -Descriptions of section entries (copied from MAINTAINERS): - - P: Person (obsolete) - M: Mail patches to: FullName - L: Mailing list that is relevant to this area - B: NvBugs Module Name - W: Web-page with status/info - Q: Patchwork web based patch tracking system site - T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit. - S: Status, one of the following: - Supported: Someone is actually paid to look after this. - Maintained: Someone actually looks after it. - Odd Fixes: It has a maintainer but they don't have time to do - much other than throw the odd patch in. See below.. - Orphan: No current maintainer [but maybe you could take the - role as you write your new code]. - Obsolete: Old code. Something tagged obsolete generally means - it has been replaced by a better system and you - should be using that. - F: Files and directories with wildcard patterns. - A trailing slash includes all files and subdirectory files. - F: drivers/net/ all files in and below drivers/net - F: drivers/net/* all files in drivers/net, but not below - F: */net/* all files in "any top level directory"/net - One pattern per line. Multiple F: lines acceptable. - X: Files and directories that are NOT maintained, same rules as F: - Files exclusions are tested before file matches. - Can be useful for excluding a specific subdirectory, for instance: - F: net/ - X: net/ipv6/ - matches all files in and below net excluding net/ipv6/ - K: Keyword perl extended regex pattern to match content in a - patch or file. For instance: - K: of_get_profile - matches patches or files that contain "of_get_profile" - K: \b(printk|pr_(info|err))\b - matches patches or files that contain one or more of the words - printk, pr_info or pr_err - One regex pattern per line. Multiple K: lines acceptable. - -Note: For the hard of thinking, this list is meant to remain in alphabetical -order. If you could add yourselves to it in alphabetical order that would be -so much easier [Ed] - -Maintainers List (try to look for most precise areas first) - ----------------------------------------------------------------------- - -ARM64 (AARCH64 ARCHITECTURE) -M: Bo Yan -M: Alexander Van Brunt -L: sw-mobile-cpu@exchange.nvidia.com -B: Mobile_Android_Kernel -S: Supported -F: arch/arm64/ -F: Documentation/arm64/ -X: arch/arm64/mach-tegra/ -X: arch/arm64/include/asm/mach/ -X: arch/arm64/configs/ - -ANDROID -M: Sachin Nikam -B: Mobile_Android_Kernel -F: drivers/android/ - -AUDIO -M: Niranjan Wartikar -M: Sharad Gupta -M: Jonathan Hunter -L: sw-mobile-audio-arch@exchange.nvidia.com -B: Mobile_Audio -S: Supported -F: sound/ -F: drivers/dma/tegra210-adma.c -X: sound/soc/tegra-virt-alt/* - -BPMP -M: Timo Alho -M: Sivaram Nair -L: sw-mobile-bpmp-dev@exchange.nvidia.com -B: Mobile_BPMP -S: Supported -F: drivers/firmware/tegra/* - -BUS -M: Laxman Dewangan -L: sw-mobile-kernel-bus@exchange.nvidia.com -B: Mobile_Android_IO_Peripherals -S: Supported -F: drivers/base/regmap/* -F: drivers/i2c/* -F: drivers/i2c/busses/* -F: drivers/misc/inter-tegra/* -F: drivers/mfd/* -F: drivers/rtc/* -F: drivers/spi/* -F: drivers/soc/tegra/* -F: drivers/tty/serial/* -F: drivers/platform/tegra/tegra_prod.c - -CAMERA -M: Sudhir Vyas -M: Bhanu Murthy -M: Songhee Baek -L: sw-mobile-camera@exchange.nvidia.com -B: Camera_Core -S: Supported -F: drivers/media/platform/vivid/ -F: drivers/media/usb/uvc/* -F: drivers/media/v4l2-core/* - -COMMS -M: Krishna Thota -M: Ashutosh Jha -L: sw-mobile-comms-hdc@exchange.nvidia.com -B: Mobile_Android_Comms_Connectivity -S: Supported -F: drivers/net/ -F: include/linux/netdevice.h -F: include/net/* -F: net/ -F: drivers/tty/serial/ublox6-gps* - -CPU -M: Alexander Van Brunt -M: Bo Yan -L: sw-mobile-cpu@exchange.nvidia.com -B: Mobile_Android_Kernel -S: Supported -F: drivers/clocksource/* -F: drivers/irqchip/* -F: drivers/perf/arm_pmu.c -F: drivers/misc/tegra_timerinfo.c -F: drivers/staging/android/fiq_debugger/* -F: include/asm-generic/* -X: drivers/platform/tegra/powergate/ - -DISPLAY -M: Mitch Luban -M: Venu Byravarasu -M: Ujwal Patel -M: Emma Yan -L: tegra-display-core@exchange.nvidia.com -B: Mobile_DDK_Display -S: Supported -F: drivers/video/backlight/backlight.c -F: drivers/video/backlight/max8831_bl.c -F: drivers/video/backlight/pwm_bl.c -F: drivers/video/backlight/tegra_pwm_bl.c -F: drivers/video/backlight/ds1050_bl.c - -DEVICE-TREE -M: Laxman Dewangan -M: Alexander Van Brunt -L: sw-mobile-platform@exchange.nvidia.com -B: Mobile_Android_IO_Peripherals -S: Supported -F: arch/arm64/boot/dts/ -F: drivers/of/ - -INPUT -M: Jordan Nien -B: Mobile_Android_Kernel -S: Supported -F: drivers/input/* - -IVC: -M: Dennis Kou -M: Peter Newman -M: Sivaram Nair -M: Vladislav Buzov -L: sw-tegra-ivc-dev@nvidia.com -B: Embedded Virtualization -S: Supported -F: drivers/platform/tegra/tegra-ivc.c -F: drivers/virt/tegra/ivc-cdev.c -F: drivers/virt/tegra/ivcbench_cli.c -F: include/linux/tegra-ivc.h -F: include/linux/tegra-ivc-instance.h - -KERNEL-CORE -M: Bharat Nihalani -M: Yu-Huan Hsu -L: sw-mobile-kernel@exchange.nvidia.com -B: Mobile_Android_Kernel -S: Supported -F: drivers/base/* -F: drivers/devfreq/ -F: drivers/staging/iio/* -F: include/linux/blk-cgroup.h -F: include/linux/fixp-arith.h -F: include/linux/genhd.h -F: include/linux/log2.h -F: include/trace/* -F: ipc/* -F: kernel/ -F: lib/ - -MEMORY -M: Sachin Nikam -M: Krishna Reddy -M: Pritesh Raithatha -M: Puneet Saxena -L: sw-mobile-memory@exchange.nvidia.com -B: Mobile_Android_Kernel -S: Supported -F: arch/arm64/mm/* -F: drivers/base/cacheinfo.c -F: drivers/base/dma-coherent.c -F: drivers/base/dma-contiguous.c -F: drivers/of/of_reserved_mem.c -F: drivers/dma/* -F: drivers/dma-buf/* -F: drivers/iommu/* -F: drivers/md/* -F: drivers/memory/* -F: drivers/staging/android/ion/* -F: include/linux/mm.h -F: include/linux/nvmap.h -F: include/linux/vmstat.h -F: include/tracing/events/nvmap.h -F: include/tracing/events/dmadebug.h -F: mm/* -F: drivers/dt-bindings/memory/ - -NVPMODEL -M: Terry Wang -M: Aaron Huang -L: terwang@nvidia.com -B: Mobile_Android_Power -S: Supported -F: drivers/nvpmodel/ - -PCI -M: Krishna Thota -M: Vidya Sagar -L: sw-mobile-pcie@exchange.nvidia.com -B: Mobile_Kernel_PCIe -S: Supported -F: drivers/nvme/host/pci.c -F: drivers/pci/host/pci-tegra.c -F: drivers/pci/ - -PLATFORM -M: Laxman Dewangan -L: sw-mobile-kernel-platform@exchange.nvidia.com -B: Mobile_Android_IO_Peripherals -S: Supported -F: drivers/gpio/* -F: drivers/hwmon/* -F: drivers/pinctrl/* - -PLATFORM POWER -M: Laxman Dewangan -L: sw-mobile-pmic@exchange.nvidia.com -B: Mobile_Android_IO_PMIC -S: Supported -F: drivers/power/ -F: drivers/power/reset -F: drivers/regulator/ -F: drivers/extcon/ - -POWER: CLOCKS AND VOLTAGE -M: Aleksandr Frid -M: Peter De Schrijver -L: sw-mobile-clocks@exchange.nvidia.com -B: Mobile_Android_Kernel -S: Supported -F: drivers/base/power/* -F: drivers/clk/ - -POWER: CPU -M: Sai Gurrappadi -M: Bo Yan -M: Krishna Sitaraman -M: Antti Miettinen " -L: sw-mobile-cpu-power@exchange.nvidia.com -B: Mobile_Android_Kernel -S: Supported -F: drivers/cpufreq/ -F: drivers/cpuidle/ -F: drivers/misc/cpuload.c - -POWER: THERMAL -M: Srikar Srimath Tirumala -L: sw-mobile-therm@exchange.nvidia.com -B: Mobile_Android_Kernel -S: Supported -F: drivers/misc/therm*_est.c -F: drivers/thermal/ - -POWER: EDP AND FRIENDS -M: Jonathan Hunter -L: mobile-sys-edp@exchange.nvidia.com -B: Mobile_Linux_Kernel -S: Supported -F: drivers/edp/ -F: drivers/misc/tegra_ppm.c -F: drivers/soc/tegra/*edp* -F: include/linux/tegra_ppm.h - -PROFILING -M: Mark Peters -M: Yifei Wan -B: Mobile_Android_Tools -S: Supported -F: drivers/hwtracing/* -F: drivers/perf/arm_pmu.c - -PROFILER -M: Allen Martin -S: Supported -F: drivers/misc/tegra-profiler/ -F: include/linux/tegra_profiler.h - -RT_PATCHES -M: Igor Nabirushkin -B: Mobile_Android_Kernel -S: Supported -F: rt-patches - -SCHED -M: Sai Gurrappadi -M: Bo Yan -L: SW-Linux-CPU-Scheduler@exchange.nvidia.com -B: Mobile_Android_Kernel -S: Supported -F: arch/arm64/include/asm/topology.h -F: arch/arm64/kernel/topology.c -F: include/linux/sched.h -F: include/linux/sched/ -F: kernel/sched/ -F: kernel/sysctl.c -F: kernel/time/tick-sched.c - -SECURE OS -M: Mihir Joshi -L: taekr-staff@exchange.nvidia.com -B: Mobile_Security -S: Supported -F: security/tlk_driver/ - -SECURITY -M: Mallikarjun Kasoju -L: Tegra_kernel_security@exchange.nvidia.com -B: Mobile_Security -S: Supported -F: crypto/* -F: drivers/crypto/* -F: drivers/misc/tegra-cryptodev.c -F: include/crypto/* -F: security/ - -SENSORS -M: Robert Collins -L: sw-mobile-sensors@exchange.nvidia.com -B: Mobile_Android_Motion_Sensors -S: Supported -F: drivers/iio/common/nvs/* -F: drivers/iio/imu/* -F: drivers/iio/light/* -F: drivers/iio/magnetometer/* -F: drivers/misc/nvs/* -F: drivers/misc/nvs-dfsh/* - -HOST INPUT DEVICES -M: Suresh Mangipudi -S: Supported -F: drivers/hid/* - -STORAGE -M: Venu Byravarasu -L: sw-mobile-mmc@exchange.nvidia.com -B: Mobile_Android_IO_Storage -S: Supported -F: block/* -F: drivers/ata/* -F: drivers/mmc/card/* -F: drivers/mmc/host/* -F: drivers/mtd/* -F: drivers/nvme/host/scsi.c -F: drivers/scsi/* -F: drivers/target/* -F: include/linux/fs.h -F: fs/ - -SYSTEM -M: Laxman Dewangan -L: sw-mobile-system@exchange.nvidia.com -B: Mobile_Android_IO_Peripheral -S: Supported -F: drivers/dma/* -F: drivers/input/keyboard/* -F: drivers/pwm/* -F: drivers/watchdog/* - -TOUCHSCREEN -M: David Pu -L: sw-mobile-touch@exchange.nvidia.com -B: Mobile Driver Touchscreen -S: Supported -F: drivers/input/touchscreen/* - -USB-EHCI -M: Rama Kandhala -M: Suresh Mangipudi -L: usb3-sweng@exchange.nvidia.com -B: mobile_android_io_usb -S: Supported -F: drivers/misc/tegra-baseband/tegra_usb_modem_power.c -F: drivers/usb/gadget/function/* -F: drivers/usb/serial/* -F: drivers/usb/gadget/tegra_udc.c -F: drivers/usb/host/ehci-tegra.c -F: drivers/usb/phy/tegra*otg.c -F: drivers/usb/phy/tegra*usb_phy.c - -USB-XHCI -M: Ashu Jha -M: ChihMin Cheng -M: Suresh Mangipudi -L: usb3-sweng@exchange.nvidia.com -B: USB3 - Software -S: Supported -F: drivers/mailbox/tegra-xusb-mailbox.c -F: drivers/usb/gadget/function/* -F: drivers/usb/serial/* -F: drivers/usb/gadget/tegra_udc.c -F: drivers/usb/gadget/udc/tegra_xudc.c -F: drivers/usb/gadget/udc/tegra_usb_cd*.c -F: drivers/usb/host/xhci-tegra* -F: drivers/usb/phy/tegra*otg.c -F: drivers/usb/phy/tegra*usb_phy.c diff --git a/kernel/kernel-5.10/arch/arm64/configs/defconfig b/kernel/kernel-5.10/arch/arm64/configs/defconfig index de31fedcae..bda2dff571 100644 --- a/kernel/kernel-5.10/arch/arm64/configs/defconfig +++ b/kernel/kernel-5.10/arch/arm64/configs/defconfig @@ -77,6 +77,7 @@ CONFIG_DMI_SYSFS=y # CONFIG_EFI_VARS_PSTORE is not set CONFIG_EFI_CAPSULE_LOADER=y CONFIG_EFI_TEST=m +CONFIG_FB_EFI=y CONFIG_ACPI=y CONFIG_ACPI_APEI=y CONFIG_ACPI_APEI_GHES=y @@ -320,6 +321,7 @@ CONFIG_PCI_ENDPOINT_CONFIGFS=y CONFIG_PCI_EPF_TEST=y CONFIG_PCIE_EPF_NV_TEST=y CONFIG_PCIE_EPF_TEGRA_VNET=y +CONFIG_PCI_SERIAL_CH384=m CONFIG_UEVENT_HELPER=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y @@ -356,6 +358,7 @@ CONFIG_EEPROM_AT24=m CONFIG_EEPROM_AT25=m CONFIG_CB710_CORE=m CONFIG_MODS=m +CONFIG_SENSORS_F75308=m CONFIG_SENSORS_NCT1008=m CONFIG_SENSORS_PEX9749=y CONFIG_FAN_THERM_EST=y @@ -1076,6 +1079,7 @@ CONFIG_HID_SENSOR_HUB=m CONFIG_HID_SENSOR_CUSTOM_SENSOR=m CONFIG_HID_ALPS=m CONFIG_USB_HIDDEV=y +CONFIG_HID_SHIELD_REMOTE=m CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_OTG=y CONFIG_USB_MON=m @@ -1287,6 +1291,7 @@ CONFIG_QFMT_V2=m CONFIG_AUTOFS4_FS=y CONFIG_FUSE_FS=m CONFIG_CUSE=m +CONFIG_ISO9660_FS=y CONFIG_OVERLAY_FS=m # CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set CONFIG_MSDOS_FS=y diff --git a/kernel/kernel-5.10/drivers/hid/hid-ids.h b/kernel/kernel-5.10/drivers/hid/hid-ids.h index 57b1624b58..64d60d9104 100644 --- a/kernel/kernel-5.10/drivers/hid/hid-ids.h +++ b/kernel/kernel-5.10/drivers/hid/hid-ids.h @@ -6,6 +6,7 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. */ /* @@ -941,6 +942,7 @@ #define USB_DEVICE_ID_NVIDIA_JARVIS 0x7212 #define USB_DEVICE_ID_NVIDIA_PEPPER 0x7213 #define USB_DEVICE_ID_NVIDIA_THUNDERSTRIKE 0x7214 +#define USB_DEVICE_ID_NVIDIA_FRIDAY 0x7217 #define USB_DEVICE_ID_NVIDIA_BLAKE 0x7210 #define USB_VENDOR_ID_ONTRAK 0x0a07 diff --git a/kernel/kernel-5.10/drivers/hwmon/gpio-tachometer.c b/kernel/kernel-5.10/drivers/hwmon/gpio-tachometer.c index a8fc834d7d..85b8099f0a 100644 --- a/kernel/kernel-5.10/drivers/hwmon/gpio-tachometer.c +++ b/kernel/kernel-5.10/drivers/hwmon/gpio-tachometer.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -231,11 +231,9 @@ static void tach_measure_work(struct work_struct *workp) tach = gpio_tachd->tach_dev; (void)gpio_tachometer_read_rpm(tach); - if (gpio_tachd->rpm == 0) - orderly_poweroff(true); - else - queue_delayed_work(gpio_tachd->tach_workqueue, - &gpio_tachd->tach_work, gpio_tachd->schedule_delay); + if (gpio_tachd->rpm) + queue_delayed_work(gpio_tachd->tach_workqueue, &gpio_tachd->tach_work, + gpio_tachd->schedule_delay); } static const struct of_device_id gpio_tachometer_of_match[] = { @@ -253,6 +251,7 @@ static int gpio_tachometer_probe(struct platform_device *pdev) struct gpio_tachometer_device *gpio_tachd = NULL; struct device *tach_dev; int ret = 0, err; + int gpio; gpio_tachd = devm_kzalloc(&pdev->dev, sizeof(*gpio_tachd), GFP_KERNEL); if (gpio_tachd == NULL) { @@ -261,9 +260,10 @@ static int gpio_tachometer_probe(struct platform_device *pdev) } mutex_init(&gpio_tachd->lock); - /* If pulse-per-rev node not present or if the value + /* + * If pulse-per-rev node not present or if the value * is less than 1, abort. - */ + */ if (of_property_read_u32(np, "pulse-per-rev", &gpio_tachd->pulse_per_rev) || (gpio_tachd->pulse_per_rev < 1)) { @@ -290,51 +290,55 @@ static int gpio_tachometer_probe(struct platform_device *pdev) (gpio_tachd->schedule_delay <= (gpio_tachd->win_len * MIN_SAMPLE_WIN))) gpio_tachd->schedule_delay = 0; - gpio_tachd->tach_gpio = of_get_named_gpio(np, "gpio", 0); + gpio = of_get_named_gpio(np, "gpio", 0); + if (!gpio_is_valid(gpio)) { + if (gpio != -EPROBE_DEFER) + dev_err(dev, "Invalid GPIO, error %d\n", gpio); + return gpio; + } + + gpio_tachd->tach_gpio = gpio; dev_info(dev, "Tachometer GPIO=%d, win-len=%u, schedule_delay=%u\n", gpio_tachd->tach_gpio, gpio_tachd->win_len, gpio_tachd->schedule_delay); - if (gpio_is_valid(gpio_tachd->tach_gpio)) { - err = devm_gpio_request_one(dev, gpio_tachd->tach_gpio, + err = devm_gpio_request_one(dev, gpio_tachd->tach_gpio, GPIOF_IN, "Tach_input"); - if (err < 0) { - dev_err(dev, "%s: Tachometer GPIO request failed for gpio %d: error %d\n", - __func__, gpio_tachd->tach_gpio, err); - return -EINVAL; - } + if (err < 0) { + dev_err(dev, "%s: Tachometer GPIO request failed for gpio %d: error %d\n", + __func__, gpio_tachd->tach_gpio, err); + return -EINVAL; + } - tach_dev = devm_hwmon_device_register_with_groups(dev, "gpiofan", + tach_dev = devm_hwmon_device_register_with_groups(dev, "gpio_tach", gpio_tachd, gpio_tach_groups); - if (IS_ERR(tach_dev)) { - ret = PTR_ERR(tach_dev); - dev_err(dev, "GPIO Tachometer driver init failed, err: %d\n", - ret); - return ret; - } - platform_set_drvdata(pdev, gpio_tachd); - gpio_tachd->tach_dev = tach_dev; - dev_info(dev, "Tachometer driver initialized with pulse_per_rev: %d and win_len: %d\n", + if (IS_ERR(tach_dev)) { + ret = PTR_ERR(tach_dev); + dev_err(dev, "GPIO Tachometer driver init failed, err: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, gpio_tachd); + gpio_tachd->tach_dev = tach_dev; + dev_info(dev, "Tachometer driver initialized with pulse_per_rev: %d and win_len: %d\n", gpio_tachd->pulse_per_rev, gpio_tachd->win_len); - /*If schedule delay is not configured, don't monitor rpm*/ - if (gpio_tachd->schedule_delay == 0) - return ret; - /*Create single thread work queue*/ - gpio_tachd->tach_workqueue = - create_singlethread_workqueue("tach_workqueue"); - INIT_DELAYED_WORK(&gpio_tachd->tach_work, tach_measure_work); - err = queue_delayed_work(gpio_tachd->tach_workqueue, - &gpio_tachd->tach_work, gpio_tachd->schedule_delay); - if (err == 1) - dev_info(dev, "tach measure work submitted successfully\n"); - else { - dev_err(dev, "tach measure work submission failed: %d\n", err); - destroy_workqueue(gpio_tachd->tach_workqueue); - devm_gpio_free(dev, gpio_tachd->tach_gpio); - ret = -EINVAL; - } - } else { - dev_err(dev, "Invalid GPIO\n"); + + /*If schedule delay is not configured, don't monitor rpm*/ + if (gpio_tachd->schedule_delay == 0) + return ret; + + /*Create single thread work queue*/ + gpio_tachd->tach_workqueue = create_singlethread_workqueue("tach_workqueue"); + INIT_DELAYED_WORK(&gpio_tachd->tach_work, tach_measure_work); + err = queue_delayed_work(gpio_tachd->tach_workqueue, &gpio_tachd->tach_work, + gpio_tachd->schedule_delay); + if (err == 1) + dev_info(dev, "tach measure work submitted successfully\n"); + else { + dev_err(dev, "tach measure work submission failed: %d\n", err); + destroy_workqueue(gpio_tachd->tach_workqueue); + devm_gpio_free(dev, gpio_tachd->tach_gpio); ret = -EINVAL; } + return ret; } diff --git a/kernel/kernel-5.10/drivers/mmc/host/sdhci-tegra.c b/kernel/kernel-5.10/drivers/mmc/host/sdhci-tegra.c index 9352e9954c..43cf507f2d 100644 --- a/kernel/kernel-5.10/drivers/mmc/host/sdhci-tegra.c +++ b/kernel/kernel-5.10/drivers/mmc/host/sdhci-tegra.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 Google, Inc. - * Copyright (c) 2012-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2012-2023, NVIDIA CORPORATION. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -137,6 +138,8 @@ #define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec #define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31) +#define SDHCI_TEGRA_CIF2AXI_CTRL_0 0x1fc + #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) #define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2) @@ -167,6 +170,8 @@ #define NVQUIRK_HAS_TMCLK BIT(14) #define NVQUIRK_ENABLE_PERIODIC_CALIB BIT(15) #define NVQUIRK_ENABLE_TUNING_DQ_OFFSET BIT(16) +#define NVQUIRK_PROGRAM_MC_STREAMID BIT(17) + #define SDHCI_TEGRA_FALLBACK_CLK_HZ 400000 #define MAX_TAP_VALUE 256 @@ -222,6 +227,7 @@ struct sdhci_tegra_soc_data { u32 nvquirks; u8 min_tap_delay; u8 max_tap_delay; + unsigned int min_host_clk; bool use_bwmgr; }; @@ -304,6 +310,7 @@ struct sdhci_tegra { bool defer_calib; bool wake_enable_failed; bool enable_cqic; + u32 streamid; }; static void sdhci_tegra_debugfs_init(struct sdhci_host *host); @@ -1369,6 +1376,8 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) */ if (!tegra_host->skip_clk_rst) { host_clk = tegra_sdhci_apply_clk_limits(host, clock); + if (host_clk < tegra_host->soc_data->min_host_clk) + host_clk = tegra_host->soc_data->min_host_clk; clk_set_rate(pltfm_host->clk, host_clk); tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk); if (tegra_host->ddr_signaling) @@ -2685,10 +2694,12 @@ static const struct sdhci_tegra_soc_data soc_data_tegra234 = { NVQUIRK_CONTROL_TRIMMER_SUPPLY | NVQUIRK_ENABLE_SDR50 | NVQUIRK_SDMMC_CLK_OVERRIDE | + NVQUIRK_PROGRAM_MC_STREAMID | NVQUIRK_ENABLE_SDR104 | NVQUIRK_HAS_TMCLK, .min_tap_delay = 95, .max_tap_delay = 111, + .min_host_clk = 20000000, .use_bwmgr = false, }; @@ -2812,6 +2823,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev) struct sdhci_pltfm_host *pltfm_host; struct sdhci_tegra *tegra_host; struct clk *clk; + struct iommu_fwspec *fwspec; int rc; match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); @@ -3056,6 +3068,23 @@ static int sdhci_tegra_probe(struct platform_device *pdev) } } + /* Program MC streamID for DMA transfers */ + if (soc_data->nvquirks & NVQUIRK_PROGRAM_MC_STREAMID) { + fwspec = dev_iommu_fwspec_get(&pdev->dev); + if (fwspec == NULL) { + rc = -ENODEV; + dev_err(mmc_dev(host->mmc), + "failed to get MC streamid: %d\n", + rc); + goto err_rst_get; + } else { + tegra_host->streamid = fwspec->ids[0] & 0xffff; + tegra_sdhci_writel(host, tegra_host->streamid | + (tegra_host->streamid << 8), + SDHCI_TEGRA_CIF2AXI_CTRL_0); + } + } + tegra_host->is_probe_done = true; schedule_delayed_work(&tegra_host->detect_delay, @@ -3296,6 +3325,13 @@ static int __maybe_unused sdhci_tegra_resume(struct device *dev) if (ret) return ret; + /* Re-program MC streamID for DMA transfers */ + if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_MC_STREAMID) { + tegra_sdhci_writel(host, tegra_host->streamid | + (tegra_host->streamid << 8), + SDHCI_TEGRA_CIF2AXI_CTRL_0); + } + ret = sdhci_resume_host(host); if (ret) goto disable_clk; diff --git a/kernel/kernel-5.10/drivers/net/phy/phy_device.c b/kernel/kernel-5.10/drivers/net/phy/phy_device.c index 920c15e620..76c5b5e47b 100644 --- a/kernel/kernel-5.10/drivers/net/phy/phy_device.c +++ b/kernel/kernel-5.10/drivers/net/phy/phy_device.c @@ -5,6 +5,7 @@ * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -944,7 +945,10 @@ static void phy_link_change(struct phy_device *phydev, bool up) netif_carrier_on(netdev); else netif_carrier_off(netdev); - phydev->adjust_link(netdev); + + if (phydev->adjust_link) + phydev->adjust_link(netdev); + if (phydev->mii_ts && phydev->mii_ts->link_state) phydev->mii_ts->link_state(phydev->mii_ts, phydev); } diff --git a/kernel/kernel-5.10/drivers/usb/typec/ucsi/ucsi_ccg.c b/kernel/kernel-5.10/drivers/usb/typec/ucsi/ucsi_ccg.c index d4e80c1639..e4526949e6 100644 --- a/kernel/kernel-5.10/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/kernel/kernel-5.10/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -2,7 +2,7 @@ /* * UCSI driver for Cypress CCGx Type-C controller * - * Copyright (C) 2017-2022 NVIDIA Corporation. All rights reserved. + * Copyright (C) 2017-2023 NVIDIA Corporation. All rights reserved. * Author: Ajay Gupta * * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c @@ -638,6 +638,11 @@ static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset, int con_index; int ret; + if (offset == UCSI_CONTROL && + UCSI_COMMAND(*(u64 *)val) == UCSI_GET_CAM_SUPPORTED && + uc->fw_build == CCG_FW_BUILD_NVIDIA_XAVIER) + return -EOPNOTSUPP; + mutex_lock(&uc->lock); pm_runtime_get_sync(uc->dev); set_bit(DEV_CMD_PENDING, &uc->flags); diff --git a/kernel/kernel-5.10/sound/soc/tegra/tegra186_asrc.c b/kernel/kernel-5.10/sound/soc/tegra/tegra186_asrc.c index 080f2c62f2..3a37d38e9e 100644 --- a/kernel/kernel-5.10/sound/soc/tegra/tegra186_asrc.c +++ b/kernel/kernel-5.10/sound/soc/tegra/tegra186_asrc.c @@ -2,7 +2,7 @@ // // tegra186_asrc.c - Tegra186 ASRC driver // -// Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved. +// Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved. #include #include @@ -717,60 +717,60 @@ ASRC_SOURCE_DECL(src_select6, 5); .invert = 0, .min = 0, .max = xmax} } static const struct snd_kcontrol_new tegra186_asrc_controls[] = { - SOC_SINGLE_EXT("Ratio1 Int", TEGRA186_ASRC_STREAM1_RATIO_INTEGER_PART, + SOC_SINGLE_EXT("Ratio1 Integer Part", TEGRA186_ASRC_STREAM1_RATIO_INTEGER_PART, 0, TEGRA186_ASRC_STREAM_RATIO_INTEGER_PART_MASK, 0, tegra186_asrc_get_ratio_int, tegra186_asrc_put_ratio_int), - SOC_SINGLE_EXT_FRAC("Ratio1 Frac", + SOC_SINGLE_EXT_FRAC("Ratio1 Fractional Part", TEGRA186_ASRC_STREAM1_RATIO_FRAC_PART, TEGRA186_ASRC_STREAM_RATIO_FRAC_PART_MASK, tegra186_asrc_get_ratio_frac, tegra186_asrc_put_ratio_frac), - SOC_SINGLE_EXT("Ratio2 Int", TEGRA186_ASRC_STREAM2_RATIO_INTEGER_PART, + SOC_SINGLE_EXT("Ratio2 Integer Part", TEGRA186_ASRC_STREAM2_RATIO_INTEGER_PART, 0, TEGRA186_ASRC_STREAM_RATIO_INTEGER_PART_MASK, 0, tegra186_asrc_get_ratio_int, tegra186_asrc_put_ratio_int), - SOC_SINGLE_EXT_FRAC("Ratio2 Frac", + SOC_SINGLE_EXT_FRAC("Ratio2 Fractional Part", TEGRA186_ASRC_STREAM2_RATIO_FRAC_PART, TEGRA186_ASRC_STREAM_RATIO_FRAC_PART_MASK, tegra186_asrc_get_ratio_frac, tegra186_asrc_put_ratio_frac), - SOC_SINGLE_EXT("Ratio3 Int", TEGRA186_ASRC_STREAM3_RATIO_INTEGER_PART, + SOC_SINGLE_EXT("Ratio3 Integer Part", TEGRA186_ASRC_STREAM3_RATIO_INTEGER_PART, 0, TEGRA186_ASRC_STREAM_RATIO_INTEGER_PART_MASK, 0, tegra186_asrc_get_ratio_int, tegra186_asrc_put_ratio_int), - SOC_SINGLE_EXT_FRAC("Ratio3 Frac", + SOC_SINGLE_EXT_FRAC("Ratio3 Fractional Part", TEGRA186_ASRC_STREAM3_RATIO_FRAC_PART, TEGRA186_ASRC_STREAM_RATIO_FRAC_PART_MASK, tegra186_asrc_get_ratio_frac, tegra186_asrc_put_ratio_frac), - SOC_SINGLE_EXT("Ratio4 Int", TEGRA186_ASRC_STREAM4_RATIO_INTEGER_PART, + SOC_SINGLE_EXT("Ratio4 Integer Part", TEGRA186_ASRC_STREAM4_RATIO_INTEGER_PART, 0, TEGRA186_ASRC_STREAM_RATIO_INTEGER_PART_MASK, 0, tegra186_asrc_get_ratio_int, tegra186_asrc_put_ratio_int), - SOC_SINGLE_EXT_FRAC("Ratio4 Frac", + SOC_SINGLE_EXT_FRAC("Ratio4 Fractional Part", TEGRA186_ASRC_STREAM4_RATIO_FRAC_PART, TEGRA186_ASRC_STREAM_RATIO_FRAC_PART_MASK, tegra186_asrc_get_ratio_frac, tegra186_asrc_put_ratio_frac), - SOC_SINGLE_EXT("Ratio5 Int", TEGRA186_ASRC_STREAM5_RATIO_INTEGER_PART, + SOC_SINGLE_EXT("Ratio5 Integer Part", TEGRA186_ASRC_STREAM5_RATIO_INTEGER_PART, 0, TEGRA186_ASRC_STREAM_RATIO_INTEGER_PART_MASK, 0, tegra186_asrc_get_ratio_int, tegra186_asrc_put_ratio_int), - SOC_SINGLE_EXT_FRAC("Ratio5 Frac", + SOC_SINGLE_EXT_FRAC("Ratio5 Fractional Part", TEGRA186_ASRC_STREAM5_RATIO_FRAC_PART, TEGRA186_ASRC_STREAM_RATIO_FRAC_PART_MASK, tegra186_asrc_get_ratio_frac, tegra186_asrc_put_ratio_frac), - SOC_SINGLE_EXT("Ratio6 Int", TEGRA186_ASRC_STREAM6_RATIO_INTEGER_PART, + SOC_SINGLE_EXT("Ratio6 Integer Part", TEGRA186_ASRC_STREAM6_RATIO_INTEGER_PART, 0, TEGRA186_ASRC_STREAM_RATIO_INTEGER_PART_MASK, 0, tegra186_asrc_get_ratio_int, tegra186_asrc_put_ratio_int), - SOC_SINGLE_EXT_FRAC("Ratio6 Frac", + SOC_SINGLE_EXT_FRAC("Ratio6 Fractional Part", TEGRA186_ASRC_STREAM6_RATIO_FRAC_PART, TEGRA186_ASRC_STREAM_RATIO_FRAC_PART_MASK, tegra186_asrc_get_ratio_frac, tegra186_asrc_put_ratio_frac), - SOC_ENUM_EXT("Ratio1 SRC", src_select1, + SOC_ENUM_EXT("Ratio1 Source", src_select1, tegra186_asrc_get_ratio_source, tegra186_asrc_put_ratio_source), - SOC_ENUM_EXT("Ratio2 SRC", src_select2, + SOC_ENUM_EXT("Ratio2 Source", src_select2, tegra186_asrc_get_ratio_source, tegra186_asrc_put_ratio_source), - SOC_ENUM_EXT("Ratio3 SRC", src_select3, + SOC_ENUM_EXT("Ratio3 Source", src_select3, tegra186_asrc_get_ratio_source, tegra186_asrc_put_ratio_source), - SOC_ENUM_EXT("Ratio4 SRC", src_select4, + SOC_ENUM_EXT("Ratio4 Source", src_select4, tegra186_asrc_get_ratio_source, tegra186_asrc_put_ratio_source), - SOC_ENUM_EXT("Ratio5 SRC", src_select5, + SOC_ENUM_EXT("Ratio5 Source", src_select5, tegra186_asrc_get_ratio_source, tegra186_asrc_put_ratio_source), - SOC_ENUM_EXT("Ratio6 SRC", src_select6, + SOC_ENUM_EXT("Ratio6 Source", src_select6, tegra186_asrc_get_ratio_source, tegra186_asrc_put_ratio_source), SOC_SINGLE_EXT("Stream1 Enable", diff --git a/kernel/kernel-5.10/sound/soc/tegra/tegra210_ahub.c b/kernel/kernel-5.10/sound/soc/tegra/tegra210_ahub.c index 8edb750f28..e210ff8255 100644 --- a/kernel/kernel-5.10/sound/soc/tegra/tegra210_ahub.c +++ b/kernel/kernel-5.10/sound/soc/tegra/tegra210_ahub.c @@ -2,7 +2,7 @@ // // tegra210_ahub.c - Tegra210 AHUB driver // -// Copyright (c) 2020-2021 NVIDIA CORPORATION. All rights reserved. +// Copyright (c) 2020-2023 NVIDIA CORPORATION. All rights reserved. #include #include @@ -139,6 +139,16 @@ void tegra210_ahub_read_ram(struct regmap *regmap, unsigned int reg_ctrl, } EXPORT_SYMBOL_GPL(tegra210_ahub_read_ram); +/* + * TODO As per downstream kernel code there will be routing issue + * if DAI names are updated for SFC, MVC and OPE input and + * output. Due to that added those modules output DAIs just to keep + * similar to upstream kernel. Single DAI is used for input and + * output. + * + * Once the routing changes are done for above mentioned modules + * as per upstream, suffix the common dai name with RX. + */ static struct snd_soc_dai_driver tegra210_ahub_dais[] = { DAI(ADMAIF1), DAI(ADMAIF2), @@ -155,58 +165,71 @@ static struct snd_soc_dai_driver tegra210_ahub_dais[] = { DAI(I2S3), DAI(I2S4), DAI(I2S5), + DAI(DMIC1), + DAI(DMIC2), + DAI(DMIC3), DAI(SFC1), + DAI(SFC1 TX), DAI(SFC2), + DAI(SFC2 TX), DAI(SFC3), + DAI(SFC3 TX), DAI(SFC4), - DAI(MIXER1-1), - DAI(MIXER1-2), - DAI(MIXER1-3), - DAI(MIXER1-4), - DAI(MIXER1-5), - DAI(MIXER1-6), - DAI(MIXER1-7), - DAI(MIXER1-8), - DAI(MIXER1-9), - DAI(MIXER1-10), + DAI(SFC4 TX), + DAI(MVC1), + DAI(MVC1 TX), + DAI(MVC2), + DAI(MVC2 TX), + DAI(AMX1 RX1), + DAI(AMX1 RX2), + DAI(AMX1 RX3), + DAI(AMX1 RX4), + DAI(AMX1), + DAI(AMX2 RX1), + DAI(AMX2 RX2), + DAI(AMX2 RX3), + DAI(AMX2 RX4), + DAI(AMX2), + DAI(ADX1), + DAI(ADX1 TX1), + DAI(ADX1 TX2), + DAI(ADX1 TX3), + DAI(ADX1 TX4), + DAI(ADX2), + DAI(ADX2 TX1), + DAI(ADX2 TX2), + DAI(ADX2 TX3), + DAI(ADX2 TX4), + DAI(MIXER1 RX1), + DAI(MIXER1 RX2), + DAI(MIXER1 RX3), + DAI(MIXER1 RX4), + DAI(MIXER1 RX5), + DAI(MIXER1 RX6), + DAI(MIXER1 RX7), + DAI(MIXER1 RX8), + DAI(MIXER1 RX9), + DAI(MIXER1 RX10), + DAI(MIXER1 TX1), + DAI(MIXER1 TX2), + DAI(MIXER1 TX3), + DAI(MIXER1 TX4), + DAI(MIXER1 TX5), + DAI(OPE1), + DAI(OPE1 TX), + DAI(OPE2), + DAI(OPE2 TX), DAI(AFC1), DAI(AFC2), DAI(AFC3), DAI(AFC4), DAI(AFC5), DAI(AFC6), - DAI(OPE1), DAI(SPKPROT1), - DAI(MVC1), - DAI(MVC2), DAI(IQC1-1), DAI(IQC1-2), DAI(IQC2-1), DAI(IQC2-2), - DAI(DMIC1), - DAI(DMIC2), - DAI(DMIC3), - DAI(AMX1), - DAI(AMX1-1), - DAI(AMX1-2), - DAI(AMX1-3), - DAI(AMX1-4), - DAI(AMX2), - DAI(AMX2-1), - DAI(AMX2-2), - DAI(AMX2-3), - DAI(AMX2-4), - DAI(ADX1-1), - DAI(ADX1-2), - DAI(ADX1-3), - DAI(ADX1-4), - DAI(ADX1), - DAI(ADX2-1), - DAI(ADX2-2), - DAI(ADX2-3), - DAI(ADX2-4), - DAI(ADX2), - DAI(OPE2), }; static struct snd_soc_dai_driver tegra186_ahub_dais[] = { @@ -220,104 +243,122 @@ static struct snd_soc_dai_driver tegra186_ahub_dais[] = { DAI(ADMAIF8), DAI(ADMAIF9), DAI(ADMAIF10), + DAI(ADMAIF11), + DAI(ADMAIF12), + DAI(ADMAIF13), + DAI(ADMAIF14), + DAI(ADMAIF15), + DAI(ADMAIF16), + DAI(ADMAIF17), + DAI(ADMAIF18), + DAI(ADMAIF19), + DAI(ADMAIF20), DAI(I2S1), DAI(I2S2), DAI(I2S3), DAI(I2S4), DAI(I2S5), + DAI(I2S6), + DAI(DMIC1), + DAI(DMIC2), + DAI(DMIC3), + DAI(DMIC4), + DAI(DSPK1), + DAI(DSPK2), DAI(SFC1), + DAI(SFC1 TX), DAI(SFC2), + DAI(SFC2 TX), DAI(SFC3), + DAI(SFC3 TX), DAI(SFC4), - DAI(MIXER1-1), - DAI(MIXER1-2), - DAI(MIXER1-3), - DAI(MIXER1-4), - DAI(MIXER1-5), - DAI(MIXER1-6), - DAI(MIXER1-7), - DAI(MIXER1-8), - DAI(MIXER1-9), - DAI(MIXER1-10), + DAI(SFC4 TX), + DAI(MVC1), + DAI(MVC1 TX), + DAI(MVC2), + DAI(MVC2 TX), + DAI(AMX1 RX1), + DAI(AMX1 RX2), + DAI(AMX1 RX3), + DAI(AMX1 RX4), + DAI(AMX1), + DAI(AMX2 RX1), + DAI(AMX2 RX2), + DAI(AMX2 RX3), + DAI(AMX2 RX4), + DAI(AMX2), + DAI(AMX3 RX1), + DAI(AMX3 RX2), + DAI(AMX3 RX3), + DAI(AMX3 RX4), + DAI(AMX3), + DAI(AMX4 RX1), + DAI(AMX4 RX2), + DAI(AMX4 RX3), + DAI(AMX4 RX4), + DAI(AMX4), + DAI(ADX1), + DAI(ADX1 TX1), + DAI(ADX1 TX2), + DAI(ADX1 TX3), + DAI(ADX1 TX4), + DAI(ADX2), + DAI(ADX2 TX1), + DAI(ADX2 TX2), + DAI(ADX2 TX3), + DAI(ADX2 TX4), + DAI(ADX3), + DAI(ADX3 TX1), + DAI(ADX3 TX2), + DAI(ADX3 TX3), + DAI(ADX3 TX4), + DAI(ADX4), + DAI(ADX4 TX1), + DAI(ADX4 TX2), + DAI(ADX4 TX3), + DAI(ADX4 TX4), + DAI(MIXER1 RX1), + DAI(MIXER1 RX2), + DAI(MIXER1 RX3), + DAI(MIXER1 RX4), + DAI(MIXER1 RX5), + DAI(MIXER1 RX6), + DAI(MIXER1 RX7), + DAI(MIXER1 RX8), + DAI(MIXER1 RX9), + DAI(MIXER1 RX10), + DAI(MIXER1 TX1), + DAI(MIXER1 TX2), + DAI(MIXER1 TX3), + DAI(MIXER1 TX4), + DAI(MIXER1 TX5), + DAI(ASRC1 RX1), + DAI(ASRC1 TX1), + DAI(ASRC1 RX2), + DAI(ASRC1 TX2), + DAI(ASRC1 RX3), + DAI(ASRC1 TX3), + DAI(ASRC1 RX4), + DAI(ASRC1 TX4), + DAI(ASRC1 RX5), + DAI(ASRC1 TX5), + DAI(ASRC1 RX6), + DAI(ASRC1 TX6), + DAI(ASRC1 RX7), + DAI(OPE1), + DAI(OPE1 TX), DAI(AFC1), DAI(AFC2), DAI(AFC3), DAI(AFC4), DAI(AFC5), DAI(AFC6), - DAI(OPE1), DAI(SPKPROT1), - DAI(MVC1), - DAI(MVC2), DAI(IQC1-1), DAI(IQC1-2), DAI(IQC2-1), DAI(IQC2-2), - DAI(DMIC1), - DAI(DMIC2), - DAI(DMIC3), - DAI(AMX1), - DAI(AMX1-1), - DAI(AMX1-2), - DAI(AMX1-3), - DAI(AMX1-4), - DAI(AMX2), - DAI(AMX2-1), - DAI(AMX2-2), - DAI(AMX2-3), - DAI(AMX2-4), - DAI(ADX1-1), - DAI(ADX1-2), - DAI(ADX1-3), - DAI(ADX1-4), - DAI(ADX1), - DAI(ADX2-1), - DAI(ADX2-2), - DAI(ADX2-3), - DAI(ADX2-4), - DAI(ADX2), - DAI(ADMAIF11), - DAI(ADMAIF12), - DAI(ADMAIF13), - DAI(ADMAIF14), - DAI(ADMAIF15), - DAI(ADMAIF16), - DAI(ADMAIF17), - DAI(ADMAIF18), - DAI(ADMAIF19), - DAI(ADMAIF20), - DAI(I2S6), - DAI(AMX3), - DAI(AMX3-1), - DAI(AMX3-2), - DAI(AMX3-3), - DAI(AMX3-4), - DAI(AMX4), - DAI(AMX4-1), - DAI(AMX4-2), - DAI(AMX4-3), - DAI(AMX4-4), - DAI(ADX3-1), - DAI(ADX3-2), - DAI(ADX3-3), - DAI(ADX3-4), - DAI(ADX3), - DAI(ADX4-1), - DAI(ADX4-2), - DAI(ADX4-3), - DAI(ADX4-4), - DAI(ADX4), - DAI(DMIC4), - DAI(ASRC1-1), - DAI(ASRC1-2), - DAI(ASRC1-3), - DAI(ASRC1-4), - DAI(ASRC1-5), - DAI(ASRC1-6), - DAI(ASRC1-7), DAI(ARAD1), - DAI(DSPK1), - DAI(DSPK2), }; static const char * const tegra210_ahub_mux_texts[] = { @@ -342,11 +383,11 @@ static const char * const tegra210_ahub_mux_texts[] = { "SFC3", "SFC4", /* index 0..19 above are inputs of PART0 Mux */ - "MIXER1-1", - "MIXER1-2", - "MIXER1-3", - "MIXER1-4", - "MIXER1-5", + "MIXER1 TX1", + "MIXER1 TX2", + "MIXER1 TX3", + "MIXER1 TX4", + "MIXER1 TX5", "AMX1", "AMX2", "AFC1", @@ -368,14 +409,14 @@ static const char * const tegra210_ahub_mux_texts[] = { "DMIC1", "DMIC2", "DMIC3", - "ADX1-1", - "ADX1-2", - "ADX1-3", - "ADX1-4", - "ADX2-1", - "ADX2-2", - "ADX2-3", - "ADX2-4", + "ADX1 TX1", + "ADX1 TX2", + "ADX1 TX3", + "ADX1 TX4", + "ADX2 TX1", + "ADX2 TX2", + "ADX2 TX3", + "ADX2 TX4", /* index 35..53 above are inputs of PART2 Mux */ }; @@ -408,11 +449,11 @@ static const char * const tegra186_ahub_mux_texts[] = { "SFC3", "SFC4", /* index 0..19 above are inputs of PART0 Mux */ - "MIXER1-1", - "MIXER1-2", - "MIXER1-3", - "MIXER1-4", - "MIXER1-5", + "MIXER1 TX1", + "MIXER1 TX2", + "MIXER1 TX3", + "MIXER1 TX4", + "MIXER1 TX5", "AMX1", "AMX2", "AMX3", @@ -437,33 +478,33 @@ static const char * const tegra186_ahub_mux_texts[] = { "DMIC2", "DMIC3", "DMIC4", - "ADX1-1", - "ADX1-2", - "ADX1-3", - "ADX1-4", - "ADX2-1", - "ADX2-2", - "ADX2-3", - "ADX2-4", + "ADX1 TX1", + "ADX1 TX2", + "ADX1 TX3", + "ADX1 TX4", + "ADX2 TX1", + "ADX2 TX2", + "ADX2 TX3", + "ADX2 TX4", /* index 35..53 above are inputs of PART2 Mux */ - "ADX3-1", - "ADX3-2", - "ADX3-3", - "ADX3-4", - "ADX4-1", - "ADX4-2", - "ADX4-3", - "ADX4-4", + "ADX3 TX1", + "ADX3 TX2", + "ADX3 TX3", + "ADX3 TX4", + "ADX4 TX1", + "ADX4 TX2", + "ADX4 TX3", + "ADX4 TX4", "ADMAIF17", "ADMAIF18", "ADMAIF19", "ADMAIF20", - "ASRC1-1", - "ASRC1-2", - "ASRC1-3", - "ASRC1-4", - "ASRC1-5", - "ASRC1-6", + "ASRC1 TX1", + "ASRC1 TX2", + "ASRC1 TX3", + "ASRC1 TX4", + "ASRC1 TX5", + "ASRC1 TX6", /* index 54..71 above are inputs of PART3 Mux */ }; @@ -814,16 +855,21 @@ static const struct snd_soc_dapm_widget tegra210_ahub_widgets[] = { WIDGETS("SFC2", t210_sfc2_tx), WIDGETS("SFC3", t210_sfc3_tx), WIDGETS("SFC4", t210_sfc4_tx), - WIDGETS("MIXER1-1", t210_mixer11_tx), - WIDGETS("MIXER1-2", t210_mixer12_tx), - WIDGETS("MIXER1-3", t210_mixer13_tx), - WIDGETS("MIXER1-4", t210_mixer14_tx), - WIDGETS("MIXER1-5", t210_mixer15_tx), - WIDGETS("MIXER1-6", t210_mixer16_tx), - WIDGETS("MIXER1-7", t210_mixer17_tx), - WIDGETS("MIXER1-8", t210_mixer18_tx), - WIDGETS("MIXER1-9", t210_mixer19_tx), - WIDGETS("MIXER1-10", t210_mixer110_tx), + WIDGETS("MIXER1 RX1", t210_mixer11_tx), + WIDGETS("MIXER1 RX2", t210_mixer12_tx), + WIDGETS("MIXER1 RX3", t210_mixer13_tx), + WIDGETS("MIXER1 RX4", t210_mixer14_tx), + WIDGETS("MIXER1 RX5", t210_mixer15_tx), + WIDGETS("MIXER1 RX6", t210_mixer16_tx), + WIDGETS("MIXER1 RX7", t210_mixer17_tx), + WIDGETS("MIXER1 RX8", t210_mixer18_tx), + WIDGETS("MIXER1 RX9", t210_mixer19_tx), + WIDGETS("MIXER1 RX10", t210_mixer110_tx), + TX_WIDGETS("MIXER1 TX1"), + TX_WIDGETS("MIXER1 TX2"), + TX_WIDGETS("MIXER1 TX3"), + TX_WIDGETS("MIXER1 TX4"), + TX_WIDGETS("MIXER1 TX5"), WIDGETS("AFC1", t210_afc1_tx), WIDGETS("AFC2", t210_afc2_tx), WIDGETS("AFC3", t210_afc3_tx), @@ -835,14 +881,14 @@ static const struct snd_soc_dapm_widget tegra210_ahub_widgets[] = { WIDGETS("SPKPROT1", t210_spkprot_tx), WIDGETS("MVC1", t210_mvc1_tx), WIDGETS("MVC2", t210_mvc2_tx), - WIDGETS("AMX1-1", t210_amx11_tx), - WIDGETS("AMX1-2", t210_amx12_tx), - WIDGETS("AMX1-3", t210_amx13_tx), - WIDGETS("AMX1-4", t210_amx14_tx), - WIDGETS("AMX2-1", t210_amx21_tx), - WIDGETS("AMX2-2", t210_amx22_tx), - WIDGETS("AMX2-3", t210_amx23_tx), - WIDGETS("AMX2-4", t210_amx24_tx), + WIDGETS("AMX1 RX1", t210_amx11_tx), + WIDGETS("AMX1 RX2", t210_amx12_tx), + WIDGETS("AMX1 RX3", t210_amx13_tx), + WIDGETS("AMX1 RX4", t210_amx14_tx), + WIDGETS("AMX2 RX1", t210_amx21_tx), + WIDGETS("AMX2 RX2", t210_amx22_tx), + WIDGETS("AMX2 RX3", t210_amx23_tx), + WIDGETS("AMX2 RX4", t210_amx24_tx), WIDGETS("ADX1", t210_adx1_tx), WIDGETS("ADX2", t210_adx2_tx), TX_WIDGETS("IQC1-1"), @@ -853,15 +899,15 @@ static const struct snd_soc_dapm_widget tegra210_ahub_widgets[] = { TX_WIDGETS("DMIC2"), TX_WIDGETS("DMIC3"), TX_WIDGETS("AMX1"), - TX_WIDGETS("ADX1-1"), - TX_WIDGETS("ADX1-2"), - TX_WIDGETS("ADX1-3"), - TX_WIDGETS("ADX1-4"), + TX_WIDGETS("ADX1 TX1"), + TX_WIDGETS("ADX1 TX2"), + TX_WIDGETS("ADX1 TX3"), + TX_WIDGETS("ADX1 TX4"), TX_WIDGETS("AMX2"), - TX_WIDGETS("ADX2-1"), - TX_WIDGETS("ADX2-2"), - TX_WIDGETS("ADX2-3"), - TX_WIDGETS("ADX2-4"), + TX_WIDGETS("ADX2 TX1"), + TX_WIDGETS("ADX2 TX2"), + TX_WIDGETS("ADX2 TX3"), + TX_WIDGETS("ADX2 TX4"), }; static const struct snd_soc_dapm_widget tegra186_ahub_widgets[] = { @@ -884,16 +930,21 @@ static const struct snd_soc_dapm_widget tegra186_ahub_widgets[] = { WIDGETS("SFC2", t186_sfc2_tx), WIDGETS("SFC3", t186_sfc3_tx), WIDGETS("SFC4", t186_sfc4_tx), - WIDGETS("MIXER1-1", t186_mixer11_tx), - WIDGETS("MIXER1-2", t186_mixer12_tx), - WIDGETS("MIXER1-3", t186_mixer13_tx), - WIDGETS("MIXER1-4", t186_mixer14_tx), - WIDGETS("MIXER1-5", t186_mixer15_tx), - WIDGETS("MIXER1-6", t186_mixer16_tx), - WIDGETS("MIXER1-7", t186_mixer17_tx), - WIDGETS("MIXER1-8", t186_mixer18_tx), - WIDGETS("MIXER1-9", t186_mixer19_tx), - WIDGETS("MIXER1-10", t186_mixer110_tx), + WIDGETS("MIXER1 RX1", t186_mixer11_tx), + WIDGETS("MIXER1 RX2", t186_mixer12_tx), + WIDGETS("MIXER1 RX3", t186_mixer13_tx), + WIDGETS("MIXER1 RX4", t186_mixer14_tx), + WIDGETS("MIXER1 RX5", t186_mixer15_tx), + WIDGETS("MIXER1 RX6", t186_mixer16_tx), + WIDGETS("MIXER1 RX7", t186_mixer17_tx), + WIDGETS("MIXER1 RX8", t186_mixer18_tx), + WIDGETS("MIXER1 RX9", t186_mixer19_tx), + WIDGETS("MIXER1 RX10", t186_mixer110_tx), + TX_WIDGETS("MIXER1 TX1"), + TX_WIDGETS("MIXER1 TX2"), + TX_WIDGETS("MIXER1 TX3"), + TX_WIDGETS("MIXER1 TX4"), + TX_WIDGETS("MIXER1 TX5"), WIDGETS("AFC1", t186_afc1_tx), WIDGETS("AFC2", t186_afc2_tx), WIDGETS("AFC3", t186_afc3_tx), @@ -904,14 +955,14 @@ static const struct snd_soc_dapm_widget tegra186_ahub_widgets[] = { WIDGETS("SPKPROT1", t186_spkprot_tx), WIDGETS("MVC1", t186_mvc1_tx), WIDGETS("MVC2", t186_mvc2_tx), - WIDGETS("AMX1-1", t186_amx11_tx), - WIDGETS("AMX1-2", t186_amx12_tx), - WIDGETS("AMX1-3", t186_amx13_tx), - WIDGETS("AMX1-4", t186_amx14_tx), - WIDGETS("AMX2-1", t186_amx21_tx), - WIDGETS("AMX2-2", t186_amx22_tx), - WIDGETS("AMX2-3", t186_amx23_tx), - WIDGETS("AMX2-4", t186_amx24_tx), + WIDGETS("AMX1 RX1", t186_amx11_tx), + WIDGETS("AMX1 RX2", t186_amx12_tx), + WIDGETS("AMX1 RX3", t186_amx13_tx), + WIDGETS("AMX1 RX4", t186_amx14_tx), + WIDGETS("AMX2 RX1", t186_amx21_tx), + WIDGETS("AMX2 RX2", t186_amx22_tx), + WIDGETS("AMX2 RX3", t186_amx23_tx), + WIDGETS("AMX2 RX4", t186_amx24_tx), WIDGETS("ADX1", t186_adx1_tx), WIDGETS("ADX2", t186_adx2_tx), TX_WIDGETS("IQC1-1"), @@ -922,15 +973,15 @@ static const struct snd_soc_dapm_widget tegra186_ahub_widgets[] = { TX_WIDGETS("DMIC2"), TX_WIDGETS("DMIC3"), TX_WIDGETS("AMX1"), - TX_WIDGETS("ADX1-1"), - TX_WIDGETS("ADX1-2"), - TX_WIDGETS("ADX1-3"), - TX_WIDGETS("ADX1-4"), + TX_WIDGETS("ADX1 TX1"), + TX_WIDGETS("ADX1 TX2"), + TX_WIDGETS("ADX1 TX3"), + TX_WIDGETS("ADX1 TX4"), TX_WIDGETS("AMX2"), - TX_WIDGETS("ADX2-1"), - TX_WIDGETS("ADX2-2"), - TX_WIDGETS("ADX2-3"), - TX_WIDGETS("ADX2-4"), + TX_WIDGETS("ADX2 TX1"), + TX_WIDGETS("ADX2 TX2"), + TX_WIDGETS("ADX2 TX3"), + TX_WIDGETS("ADX2 TX4"), WIDGETS("ADMAIF11", t186_admaif11_tx), WIDGETS("ADMAIF12", t186_admaif12_tx), WIDGETS("ADMAIF13", t186_admaif13_tx), @@ -942,35 +993,41 @@ static const struct snd_soc_dapm_widget tegra186_ahub_widgets[] = { WIDGETS("ADMAIF19", t186_admaif19_tx), WIDGETS("ADMAIF20", t186_admaif20_tx), WIDGETS("I2S6", t186_i2s6_tx), - WIDGETS("AMX3-1", t186_amx31_tx), - WIDGETS("AMX3-2", t186_amx32_tx), - WIDGETS("AMX3-3", t186_amx33_tx), - WIDGETS("AMX3-4", t186_amx34_tx), - WIDGETS("AMX4-1", t186_amx41_tx), - WIDGETS("AMX4-2", t186_amx42_tx), - WIDGETS("AMX4-3", t186_amx43_tx), - WIDGETS("AMX4-4", t186_amx44_tx), + WIDGETS("AMX3 RX1", t186_amx31_tx), + WIDGETS("AMX3 RX2", t186_amx32_tx), + WIDGETS("AMX3 RX3", t186_amx33_tx), + WIDGETS("AMX3 RX4", t186_amx34_tx), + WIDGETS("AMX4 RX1", t186_amx41_tx), + WIDGETS("AMX4 RX2", t186_amx42_tx), + WIDGETS("AMX4 RX3", t186_amx43_tx), + WIDGETS("AMX4 RX4", t186_amx44_tx), WIDGETS("ADX3", t186_adx3_tx), WIDGETS("ADX4", t186_adx4_tx), - WIDGETS("ASRC1-1", t186_asrc11_tx), - WIDGETS("ASRC1-2", t186_asrc12_tx), - WIDGETS("ASRC1-3", t186_asrc13_tx), - WIDGETS("ASRC1-4", t186_asrc14_tx), - WIDGETS("ASRC1-5", t186_asrc15_tx), - WIDGETS("ASRC1-6", t186_asrc16_tx), - WIDGETS("ASRC1-7", t186_asrc17_tx), + WIDGETS("ASRC1 RX1", t186_asrc11_tx), + WIDGETS("ASRC1 RX2", t186_asrc12_tx), + WIDGETS("ASRC1 RX3", t186_asrc13_tx), + WIDGETS("ASRC1 RX4", t186_asrc14_tx), + WIDGETS("ASRC1 RX5", t186_asrc15_tx), + WIDGETS("ASRC1 RX6", t186_asrc16_tx), + WIDGETS("ASRC1 RX7", t186_asrc17_tx), + TX_WIDGETS("ASRC1 TX1"), + TX_WIDGETS("ASRC1 TX2"), + TX_WIDGETS("ASRC1 TX3"), + TX_WIDGETS("ASRC1 TX4"), + TX_WIDGETS("ASRC1 TX5"), + TX_WIDGETS("ASRC1 TX6"), WIDGETS("DSPK1", t186_dspk1_tx), WIDGETS("DSPK2", t186_dspk2_tx), TX_WIDGETS("AMX3"), - TX_WIDGETS("ADX3-1"), - TX_WIDGETS("ADX3-2"), - TX_WIDGETS("ADX3-3"), - TX_WIDGETS("ADX3-4"), + TX_WIDGETS("ADX3 TX1"), + TX_WIDGETS("ADX3 TX2"), + TX_WIDGETS("ADX3 TX3"), + TX_WIDGETS("ADX3 TX4"), TX_WIDGETS("AMX4"), - TX_WIDGETS("ADX4-1"), - TX_WIDGETS("ADX4-2"), - TX_WIDGETS("ADX4-3"), - TX_WIDGETS("ADX4-4"), + TX_WIDGETS("ADX4 TX1"), + TX_WIDGETS("ADX4 TX2"), + TX_WIDGETS("ADX4 TX3"), + TX_WIDGETS("ADX4 TX4"), TX_WIDGETS("DMIC4"), TX_WIDGETS("ARAD1"), }; @@ -996,16 +1053,21 @@ static const struct snd_soc_dapm_widget tegra234_ahub_widgets[] = { WIDGETS("SFC2", t186_sfc2_tx), WIDGETS("SFC3", t186_sfc3_tx), WIDGETS("SFC4", t186_sfc4_tx), - WIDGETS("MIXER1-1", t186_mixer11_tx), - WIDGETS("MIXER1-2", t186_mixer12_tx), - WIDGETS("MIXER1-3", t186_mixer13_tx), - WIDGETS("MIXER1-4", t186_mixer14_tx), - WIDGETS("MIXER1-5", t186_mixer15_tx), - WIDGETS("MIXER1-6", t186_mixer16_tx), - WIDGETS("MIXER1-7", t186_mixer17_tx), - WIDGETS("MIXER1-8", t186_mixer18_tx), - WIDGETS("MIXER1-9", t186_mixer19_tx), - WIDGETS("MIXER1-10", t186_mixer110_tx), + WIDGETS("MIXER1 RX1", t186_mixer11_tx), + WIDGETS("MIXER1 RX2", t186_mixer12_tx), + WIDGETS("MIXER1 RX3", t186_mixer13_tx), + WIDGETS("MIXER1 RX4", t186_mixer14_tx), + WIDGETS("MIXER1 RX5", t186_mixer15_tx), + WIDGETS("MIXER1 RX6", t186_mixer16_tx), + WIDGETS("MIXER1 RX7", t186_mixer17_tx), + WIDGETS("MIXER1 RX8", t186_mixer18_tx), + WIDGETS("MIXER1 RX9", t186_mixer19_tx), + WIDGETS("MIXER1 RX10", t186_mixer110_tx), + TX_WIDGETS("MIXER1 TX1"), + TX_WIDGETS("MIXER1 TX2"), + TX_WIDGETS("MIXER1 TX3"), + TX_WIDGETS("MIXER1 TX4"), + TX_WIDGETS("MIXER1 TX5"), WIDGETS("AFC1", t234_afc1_tx), WIDGETS("AFC2", t234_afc2_tx), WIDGETS("AFC3", t234_afc3_tx), @@ -1016,14 +1078,14 @@ static const struct snd_soc_dapm_widget tegra234_ahub_widgets[] = { WIDGETS("SPKPROT1", t234_spkprot_tx), WIDGETS("MVC1", t234_mvc1_tx), WIDGETS("MVC2", t234_mvc2_tx), - WIDGETS("AMX1-1", t234_amx11_tx), - WIDGETS("AMX1-2", t234_amx12_tx), - WIDGETS("AMX1-3", t234_amx13_tx), - WIDGETS("AMX1-4", t234_amx14_tx), - WIDGETS("AMX2-1", t234_amx21_tx), - WIDGETS("AMX2-2", t234_amx22_tx), - WIDGETS("AMX2-3", t234_amx23_tx), - WIDGETS("AMX2-4", t234_amx24_tx), + WIDGETS("AMX1 RX1", t234_amx11_tx), + WIDGETS("AMX1 RX2", t234_amx12_tx), + WIDGETS("AMX1 RX3", t234_amx13_tx), + WIDGETS("AMX1 RX4", t234_amx14_tx), + WIDGETS("AMX2 RX1", t234_amx21_tx), + WIDGETS("AMX2 RX2", t234_amx22_tx), + WIDGETS("AMX2 RX3", t234_amx23_tx), + WIDGETS("AMX2 RX4", t234_amx24_tx), WIDGETS("ADX1", t234_adx1_tx), WIDGETS("ADX2", t234_adx2_tx), TX_WIDGETS("IQC1-1"), @@ -1034,15 +1096,15 @@ static const struct snd_soc_dapm_widget tegra234_ahub_widgets[] = { TX_WIDGETS("DMIC2"), TX_WIDGETS("DMIC3"), TX_WIDGETS("AMX1"), - TX_WIDGETS("ADX1-1"), - TX_WIDGETS("ADX1-2"), - TX_WIDGETS("ADX1-3"), - TX_WIDGETS("ADX1-4"), + TX_WIDGETS("ADX1 TX1"), + TX_WIDGETS("ADX1 TX2"), + TX_WIDGETS("ADX1 TX3"), + TX_WIDGETS("ADX1 TX4"), TX_WIDGETS("AMX2"), - TX_WIDGETS("ADX2-1"), - TX_WIDGETS("ADX2-2"), - TX_WIDGETS("ADX2-3"), - TX_WIDGETS("ADX2-4"), + TX_WIDGETS("ADX2 TX1"), + TX_WIDGETS("ADX2 TX2"), + TX_WIDGETS("ADX2 TX3"), + TX_WIDGETS("ADX2 TX4"), WIDGETS("ADMAIF11", t186_admaif11_tx), WIDGETS("ADMAIF12", t186_admaif12_tx), WIDGETS("ADMAIF13", t186_admaif13_tx), @@ -1054,35 +1116,41 @@ static const struct snd_soc_dapm_widget tegra234_ahub_widgets[] = { WIDGETS("ADMAIF19", t234_admaif19_tx), WIDGETS("ADMAIF20", t234_admaif20_tx), WIDGETS("I2S6", t186_i2s6_tx), - WIDGETS("AMX3-1", t234_amx31_tx), - WIDGETS("AMX3-2", t234_amx32_tx), - WIDGETS("AMX3-3", t234_amx33_tx), - WIDGETS("AMX3-4", t234_amx34_tx), - WIDGETS("AMX4-1", t234_amx41_tx), - WIDGETS("AMX4-2", t234_amx42_tx), - WIDGETS("AMX4-3", t234_amx43_tx), - WIDGETS("AMX4-4", t234_amx44_tx), + WIDGETS("AMX3 RX1", t234_amx31_tx), + WIDGETS("AMX3 RX2", t234_amx32_tx), + WIDGETS("AMX3 RX3", t234_amx33_tx), + WIDGETS("AMX3 RX4", t234_amx34_tx), + WIDGETS("AMX4 RX1", t234_amx41_tx), + WIDGETS("AMX4 RX2", t234_amx42_tx), + WIDGETS("AMX4 RX3", t234_amx43_tx), + WIDGETS("AMX4 RX4", t234_amx44_tx), WIDGETS("ADX3", t234_adx3_tx), WIDGETS("ADX4", t234_adx4_tx), - WIDGETS("ASRC1-1", t234_asrc11_tx), - WIDGETS("ASRC1-2", t234_asrc12_tx), - WIDGETS("ASRC1-3", t234_asrc13_tx), - WIDGETS("ASRC1-4", t234_asrc14_tx), - WIDGETS("ASRC1-5", t234_asrc15_tx), - WIDGETS("ASRC1-6", t234_asrc16_tx), - WIDGETS("ASRC1-7", t234_asrc17_tx), + WIDGETS("ASRC1 RX1", t234_asrc11_tx), + WIDGETS("ASRC1 RX2", t234_asrc12_tx), + WIDGETS("ASRC1 RX3", t234_asrc13_tx), + WIDGETS("ASRC1 RX4", t234_asrc14_tx), + WIDGETS("ASRC1 RX5", t234_asrc15_tx), + WIDGETS("ASRC1 RX6", t234_asrc16_tx), + WIDGETS("ASRC1 RX7", t234_asrc17_tx), + TX_WIDGETS("ASRC1 TX1"), + TX_WIDGETS("ASRC1 TX2"), + TX_WIDGETS("ASRC1 TX3"), + TX_WIDGETS("ASRC1 TX4"), + TX_WIDGETS("ASRC1 TX5"), + TX_WIDGETS("ASRC1 TX6"), WIDGETS("DSPK1", t186_dspk1_tx), WIDGETS("DSPK2", t186_dspk2_tx), TX_WIDGETS("AMX3"), - TX_WIDGETS("ADX3-1"), - TX_WIDGETS("ADX3-2"), - TX_WIDGETS("ADX3-3"), - TX_WIDGETS("ADX3-4"), + TX_WIDGETS("ADX3 TX1"), + TX_WIDGETS("ADX3 TX2"), + TX_WIDGETS("ADX3 TX3"), + TX_WIDGETS("ADX3 TX4"), TX_WIDGETS("AMX4"), - TX_WIDGETS("ADX4-1"), - TX_WIDGETS("ADX4-2"), - TX_WIDGETS("ADX4-3"), - TX_WIDGETS("ADX4-4"), + TX_WIDGETS("ADX4 TX1"), + TX_WIDGETS("ADX4 TX2"), + TX_WIDGETS("ADX4 TX3"), + TX_WIDGETS("ADX4 TX4"), TX_WIDGETS("DMIC4"), TX_WIDGETS("ARAD1"), }; @@ -1110,11 +1178,11 @@ static const struct snd_soc_dapm_widget tegra234_ahub_widgets[] = { { name " Mux", "SFC2", "SFC2 XBAR-RX" }, \ { name " Mux", "SFC3", "SFC3 XBAR-RX" }, \ { name " Mux", "SFC4", "SFC4 XBAR-RX" }, \ - { name " Mux", "MIXER1-1", "MIXER1-1 XBAR-RX" }, \ - { name " Mux", "MIXER1-2", "MIXER1-2 XBAR-RX" }, \ - { name " Mux", "MIXER1-3", "MIXER1-3 XBAR-RX" }, \ - { name " Mux", "MIXER1-4", "MIXER1-4 XBAR-RX" }, \ - { name " Mux", "MIXER1-5", "MIXER1-5 XBAR-RX" }, \ + { name " Mux", "MIXER1 TX1", "MIXER1 TX1 XBAR-RX" }, \ + { name " Mux", "MIXER1 TX2", "MIXER1 TX2 XBAR-RX" }, \ + { name " Mux", "MIXER1 TX3", "MIXER1 TX3 XBAR-RX" }, \ + { name " Mux", "MIXER1 TX4", "MIXER1 TX4 XBAR-RX" }, \ + { name " Mux", "MIXER1 TX5", "MIXER1 TX5 XBAR-RX" }, \ { name " Mux", "AFC1", "AFC1 XBAR-RX" }, \ { name " Mux", "AFC2", "AFC2 XBAR-RX" }, \ { name " Mux", "AFC3", "AFC3 XBAR-RX" }, \ @@ -1132,15 +1200,15 @@ static const struct snd_soc_dapm_widget tegra234_ahub_widgets[] = { { name " Mux", "DMIC2", "DMIC2 XBAR-RX" }, \ { name " Mux", "DMIC3", "DMIC3 XBAR-RX" }, \ { name " Mux", "AMX1", "AMX1 XBAR-RX" }, \ - { name " Mux", "ADX1-1", "ADX1-1 XBAR-RX" }, \ - { name " Mux", "ADX1-2", "ADX1-2 XBAR-RX" }, \ - { name " Mux", "ADX1-3", "ADX1-3 XBAR-RX" }, \ - { name " Mux", "ADX1-4", "ADX1-4 XBAR-RX" }, \ + { name " Mux", "ADX1 TX1", "ADX1 TX1 XBAR-RX" }, \ + { name " Mux", "ADX1 TX2", "ADX1 TX2 XBAR-RX" }, \ + { name " Mux", "ADX1 TX3", "ADX1 TX3 XBAR-RX" }, \ + { name " Mux", "ADX1 TX4", "ADX1 TX4 XBAR-RX" }, \ { name " Mux", "AMX2", "AMX2 XBAR-RX" }, \ - { name " Mux", "ADX2-1", "ADX2-1 XBAR-RX" }, \ - { name " Mux", "ADX2-2", "ADX2-2 XBAR-RX" }, \ - { name " Mux", "ADX2-3", "ADX2-3 XBAR-RX" }, \ - { name " Mux", "ADX2-4", "ADX2-4 XBAR-RX" }, + { name " Mux", "ADX2 TX1", "ADX2 TX1 XBAR-RX" }, \ + { name " Mux", "ADX2 TX2", "ADX2 TX2 XBAR-RX" }, \ + { name " Mux", "ADX2 TX3", "ADX2 TX3 XBAR-RX" }, \ + { name " Mux", "ADX2 TX4", "ADX2 TX4 XBAR-RX" }, #define TEGRA210_ONLY_MUX_ROUTES(name) \ { name " Mux", "OPE2", "OPE2 XBAR-RX" }, @@ -1158,22 +1226,22 @@ static const struct snd_soc_dapm_widget tegra234_ahub_widgets[] = { { name " Mux", "ADMAIF20", "ADMAIF20 XBAR-RX" }, \ { name " Mux", "DMIC4", "DMIC4 XBAR-RX" }, \ { name " Mux", "I2S6", "I2S6 XBAR-RX" }, \ - { name " Mux", "ASRC1-1", "ASRC1-1 XBAR-RX" }, \ - { name " Mux", "ASRC1-2", "ASRC1-2 XBAR-RX" }, \ - { name " Mux", "ASRC1-3", "ASRC1-3 XBAR-RX" }, \ - { name " Mux", "ASRC1-4", "ASRC1-4 XBAR-RX" }, \ - { name " Mux", "ASRC1-5", "ASRC1-5 XBAR-RX" }, \ - { name " Mux", "ASRC1-6", "ASRC1-6 XBAR-RX" }, \ + { name " Mux", "ASRC1 TX1", "ASRC1 TX1 XBAR-RX" }, \ + { name " Mux", "ASRC1 TX2", "ASRC1 TX2 XBAR-RX" }, \ + { name " Mux", "ASRC1 TX3", "ASRC1 TX3 XBAR-RX" }, \ + { name " Mux", "ASRC1 TX4", "ASRC1 TX4 XBAR-RX" }, \ + { name " Mux", "ASRC1 TX5", "ASRC1 TX5 XBAR-RX" }, \ + { name " Mux", "ASRC1 TX6", "ASRC1 TX6 XBAR-RX" }, \ { name " Mux", "AMX3", "AMX3 XBAR-RX" }, \ - { name " Mux", "ADX3-1", "ADX3-1 XBAR-RX" }, \ - { name " Mux", "ADX3-2", "ADX3-2 XBAR-RX" }, \ - { name " Mux", "ADX3-3", "ADX3-3 XBAR-RX" }, \ - { name " Mux", "ADX3-4", "ADX3-4 XBAR-RX" }, \ + { name " Mux", "ADX3 TX1", "ADX3 TX1 XBAR-RX" }, \ + { name " Mux", "ADX3 TX2", "ADX3 TX2 XBAR-RX" }, \ + { name " Mux", "ADX3 TX3", "ADX3 TX3 XBAR-RX" }, \ + { name " Mux", "ADX3 TX4", "ADX3 TX4 XBAR-RX" }, \ { name " Mux", "AMX4", "AMX4 XBAR-RX" }, \ - { name " Mux", "ADX4-1", "ADX4-1 XBAR-RX" }, \ - { name " Mux", "ADX4-2", "ADX4-2 XBAR-RX" }, \ - { name " Mux", "ADX4-3", "ADX4-3 XBAR-RX" }, \ - { name " Mux", "ADX4-4", "ADX4-4 XBAR-RX" }, \ + { name " Mux", "ADX4 TX1", "ADX4 TX1 XBAR-RX" }, \ + { name " Mux", "ADX4 TX2", "ADX4 TX2 XBAR-RX" }, \ + { name " Mux", "ADX4 TX3", "ADX4 TX3 XBAR-RX" }, \ + { name " Mux", "ADX4 TX4", "ADX4 TX4 XBAR-RX" }, \ { name " Mux", "ARAD1", "ARAD1 XBAR-RX" }, #define TEGRA210_MUX_ROUTES(name) \ @@ -1223,16 +1291,21 @@ static const struct snd_soc_dapm_route tegra210_ahub_routes[] = { TEGRA210_MUX_ROUTES("SFC2") TEGRA210_MUX_ROUTES("SFC3") TEGRA210_MUX_ROUTES("SFC4") - TEGRA210_MUX_ROUTES("MIXER1-1") - TEGRA210_MUX_ROUTES("MIXER1-2") - TEGRA210_MUX_ROUTES("MIXER1-3") - TEGRA210_MUX_ROUTES("MIXER1-4") - TEGRA210_MUX_ROUTES("MIXER1-5") - TEGRA210_MUX_ROUTES("MIXER1-6") - TEGRA210_MUX_ROUTES("MIXER1-7") - TEGRA210_MUX_ROUTES("MIXER1-8") - TEGRA210_MUX_ROUTES("MIXER1-9") - TEGRA210_MUX_ROUTES("MIXER1-10") + TEGRA210_MUX_ROUTES("MIXER1 RX1") + TEGRA210_MUX_ROUTES("MIXER1 RX2") + TEGRA210_MUX_ROUTES("MIXER1 RX3") + TEGRA210_MUX_ROUTES("MIXER1 RX4") + TEGRA210_MUX_ROUTES("MIXER1 RX5") + TEGRA210_MUX_ROUTES("MIXER1 RX6") + TEGRA210_MUX_ROUTES("MIXER1 RX7") + TEGRA210_MUX_ROUTES("MIXER1 RX8") + TEGRA210_MUX_ROUTES("MIXER1 RX9") + TEGRA210_MUX_ROUTES("MIXER1 RX10") + IN_OUT_ROUTES("MIXER1 TX1") + IN_OUT_ROUTES("MIXER1 TX2") + IN_OUT_ROUTES("MIXER1 TX3") + IN_OUT_ROUTES("MIXER1 TX4") + IN_OUT_ROUTES("MIXER1 TX5") TEGRA210_MUX_ROUTES("AFC1") TEGRA210_MUX_ROUTES("AFC2") TEGRA210_MUX_ROUTES("AFC3") @@ -1244,14 +1317,14 @@ static const struct snd_soc_dapm_route tegra210_ahub_routes[] = { TEGRA210_MUX_ROUTES("SPKPROT1") TEGRA210_MUX_ROUTES("MVC1") TEGRA210_MUX_ROUTES("MVC2") - TEGRA210_MUX_ROUTES("AMX1-1") - TEGRA210_MUX_ROUTES("AMX1-2") - TEGRA210_MUX_ROUTES("AMX1-3") - TEGRA210_MUX_ROUTES("AMX1-4") - TEGRA210_MUX_ROUTES("AMX2-1") - TEGRA210_MUX_ROUTES("AMX2-2") - TEGRA210_MUX_ROUTES("AMX2-3") - TEGRA210_MUX_ROUTES("AMX2-4") + TEGRA210_MUX_ROUTES("AMX1 RX1") + TEGRA210_MUX_ROUTES("AMX1 RX2") + TEGRA210_MUX_ROUTES("AMX1 RX3") + TEGRA210_MUX_ROUTES("AMX1 RX4") + TEGRA210_MUX_ROUTES("AMX2 RX1") + TEGRA210_MUX_ROUTES("AMX2 RX2") + TEGRA210_MUX_ROUTES("AMX2 RX3") + TEGRA210_MUX_ROUTES("AMX2 RX4") TEGRA210_MUX_ROUTES("ADX1") TEGRA210_MUX_ROUTES("ADX2") IN_OUT_ROUTES("IQC1-1") @@ -1263,14 +1336,14 @@ static const struct snd_soc_dapm_route tegra210_ahub_routes[] = { IN_OUT_ROUTES("DMIC3") IN_OUT_ROUTES("AMX1") IN_OUT_ROUTES("AMX2") - IN_OUT_ROUTES("ADX1-1") - IN_OUT_ROUTES("ADX1-2") - IN_OUT_ROUTES("ADX1-3") - IN_OUT_ROUTES("ADX1-4") - IN_OUT_ROUTES("ADX2-1") - IN_OUT_ROUTES("ADX2-2") - IN_OUT_ROUTES("ADX2-3") - IN_OUT_ROUTES("ADX2-4") + IN_OUT_ROUTES("ADX1 TX1") + IN_OUT_ROUTES("ADX1 TX2") + IN_OUT_ROUTES("ADX1 TX3") + IN_OUT_ROUTES("ADX1 TX4") + IN_OUT_ROUTES("ADX2 TX1") + IN_OUT_ROUTES("ADX2 TX2") + IN_OUT_ROUTES("ADX2 TX3") + IN_OUT_ROUTES("ADX2 TX4") }; static const struct snd_soc_dapm_route tegra186_ahub_routes[] = { @@ -1315,16 +1388,21 @@ static const struct snd_soc_dapm_route tegra186_ahub_routes[] = { TEGRA186_MUX_ROUTES("SFC2") TEGRA186_MUX_ROUTES("SFC3") TEGRA186_MUX_ROUTES("SFC4") - TEGRA186_MUX_ROUTES("MIXER1-1") - TEGRA186_MUX_ROUTES("MIXER1-2") - TEGRA186_MUX_ROUTES("MIXER1-3") - TEGRA186_MUX_ROUTES("MIXER1-4") - TEGRA186_MUX_ROUTES("MIXER1-5") - TEGRA186_MUX_ROUTES("MIXER1-6") - TEGRA186_MUX_ROUTES("MIXER1-7") - TEGRA186_MUX_ROUTES("MIXER1-8") - TEGRA186_MUX_ROUTES("MIXER1-9") - TEGRA186_MUX_ROUTES("MIXER1-10") + TEGRA186_MUX_ROUTES("MIXER1 RX1") + TEGRA186_MUX_ROUTES("MIXER1 RX2") + TEGRA186_MUX_ROUTES("MIXER1 RX3") + TEGRA186_MUX_ROUTES("MIXER1 RX4") + TEGRA186_MUX_ROUTES("MIXER1 RX5") + TEGRA186_MUX_ROUTES("MIXER1 RX6") + TEGRA186_MUX_ROUTES("MIXER1 RX7") + TEGRA186_MUX_ROUTES("MIXER1 RX8") + TEGRA186_MUX_ROUTES("MIXER1 RX9") + TEGRA186_MUX_ROUTES("MIXER1 RX10") + IN_OUT_ROUTES("MIXER1 TX1") + IN_OUT_ROUTES("MIXER1 TX2") + IN_OUT_ROUTES("MIXER1 TX3") + IN_OUT_ROUTES("MIXER1 TX4") + IN_OUT_ROUTES("MIXER1 TX5") TEGRA186_MUX_ROUTES("AFC1") TEGRA186_MUX_ROUTES("AFC2") TEGRA186_MUX_ROUTES("AFC3") @@ -1335,14 +1413,14 @@ static const struct snd_soc_dapm_route tegra186_ahub_routes[] = { TEGRA186_MUX_ROUTES("SPKPROT1") TEGRA186_MUX_ROUTES("MVC1") TEGRA186_MUX_ROUTES("MVC2") - TEGRA186_MUX_ROUTES("AMX1-1") - TEGRA186_MUX_ROUTES("AMX1-2") - TEGRA186_MUX_ROUTES("AMX1-3") - TEGRA186_MUX_ROUTES("AMX1-4") - TEGRA186_MUX_ROUTES("AMX2-1") - TEGRA186_MUX_ROUTES("AMX2-2") - TEGRA186_MUX_ROUTES("AMX2-3") - TEGRA186_MUX_ROUTES("AMX2-4") + TEGRA186_MUX_ROUTES("AMX1 RX1") + TEGRA186_MUX_ROUTES("AMX1 RX2") + TEGRA186_MUX_ROUTES("AMX1 RX3") + TEGRA186_MUX_ROUTES("AMX1 RX4") + TEGRA186_MUX_ROUTES("AMX2 RX1") + TEGRA186_MUX_ROUTES("AMX2 RX2") + TEGRA186_MUX_ROUTES("AMX2 RX3") + TEGRA186_MUX_ROUTES("AMX2 RX4") TEGRA186_MUX_ROUTES("ADX1") TEGRA186_MUX_ROUTES("ADX2") IN_OUT_ROUTES("IQC1-1") @@ -1354,14 +1432,14 @@ static const struct snd_soc_dapm_route tegra186_ahub_routes[] = { IN_OUT_ROUTES("DMIC3") IN_OUT_ROUTES("AMX1") IN_OUT_ROUTES("AMX2") - IN_OUT_ROUTES("ADX1-1") - IN_OUT_ROUTES("ADX1-2") - IN_OUT_ROUTES("ADX1-3") - IN_OUT_ROUTES("ADX1-4") - IN_OUT_ROUTES("ADX2-1") - IN_OUT_ROUTES("ADX2-2") - IN_OUT_ROUTES("ADX2-3") - IN_OUT_ROUTES("ADX2-4") + IN_OUT_ROUTES("ADX1 TX1") + IN_OUT_ROUTES("ADX1 TX2") + IN_OUT_ROUTES("ADX1 TX3") + IN_OUT_ROUTES("ADX1 TX4") + IN_OUT_ROUTES("ADX2 TX1") + IN_OUT_ROUTES("ADX2 TX2") + IN_OUT_ROUTES("ADX2 TX3") + IN_OUT_ROUTES("ADX2 TX4") TEGRA186_MUX_ROUTES("ADMAIF11") TEGRA186_MUX_ROUTES("ADMAIF12") TEGRA186_MUX_ROUTES("ADMAIF13") @@ -1372,37 +1450,43 @@ static const struct snd_soc_dapm_route tegra186_ahub_routes[] = { TEGRA186_MUX_ROUTES("ADMAIF18") TEGRA186_MUX_ROUTES("ADMAIF19") TEGRA186_MUX_ROUTES("ADMAIF20") - TEGRA186_MUX_ROUTES("AMX3-1") - TEGRA186_MUX_ROUTES("AMX3-2") - TEGRA186_MUX_ROUTES("AMX3-3") - TEGRA186_MUX_ROUTES("AMX3-4") - TEGRA186_MUX_ROUTES("AMX4-1") - TEGRA186_MUX_ROUTES("AMX4-2") - TEGRA186_MUX_ROUTES("AMX4-3") - TEGRA186_MUX_ROUTES("AMX4-4") + TEGRA186_MUX_ROUTES("AMX3 RX1") + TEGRA186_MUX_ROUTES("AMX3 RX2") + TEGRA186_MUX_ROUTES("AMX3 RX3") + TEGRA186_MUX_ROUTES("AMX3 RX4") + TEGRA186_MUX_ROUTES("AMX4 RX1") + TEGRA186_MUX_ROUTES("AMX4 RX2") + TEGRA186_MUX_ROUTES("AMX4 RX3") + TEGRA186_MUX_ROUTES("AMX4 RX4") TEGRA186_MUX_ROUTES("ADX3") TEGRA186_MUX_ROUTES("ADX4") TEGRA186_MUX_ROUTES("I2S6") - TEGRA186_MUX_ROUTES("ASRC1-1") - TEGRA186_MUX_ROUTES("ASRC1-2") - TEGRA186_MUX_ROUTES("ASRC1-3") - TEGRA186_MUX_ROUTES("ASRC1-4") - TEGRA186_MUX_ROUTES("ASRC1-5") - TEGRA186_MUX_ROUTES("ASRC1-6") - TEGRA186_MUX_ROUTES("ASRC1-7") + TEGRA186_MUX_ROUTES("ASRC1 RX1") + TEGRA186_MUX_ROUTES("ASRC1 RX2") + TEGRA186_MUX_ROUTES("ASRC1 RX3") + TEGRA186_MUX_ROUTES("ASRC1 RX4") + TEGRA186_MUX_ROUTES("ASRC1 RX5") + TEGRA186_MUX_ROUTES("ASRC1 RX6") + TEGRA186_MUX_ROUTES("ASRC1 RX7") + IN_OUT_ROUTES("ASRC1 TX1") + IN_OUT_ROUTES("ASRC1 TX2") + IN_OUT_ROUTES("ASRC1 TX3") + IN_OUT_ROUTES("ASRC1 TX4") + IN_OUT_ROUTES("ASRC1 TX5") + IN_OUT_ROUTES("ASRC1 TX6") TEGRA186_MUX_ROUTES("DSPK1") TEGRA186_MUX_ROUTES("DSPK2") IN_OUT_ROUTES("DMIC4") IN_OUT_ROUTES("AMX3") IN_OUT_ROUTES("AMX4") - IN_OUT_ROUTES("ADX3-1") - IN_OUT_ROUTES("ADX3-2") - IN_OUT_ROUTES("ADX3-3") - IN_OUT_ROUTES("ADX3-4") - IN_OUT_ROUTES("ADX4-1") - IN_OUT_ROUTES("ADX4-2") - IN_OUT_ROUTES("ADX4-3") - IN_OUT_ROUTES("ADX4-4") + IN_OUT_ROUTES("ADX3 TX1") + IN_OUT_ROUTES("ADX3 TX2") + IN_OUT_ROUTES("ADX3 TX3") + IN_OUT_ROUTES("ADX3 TX4") + IN_OUT_ROUTES("ADX4 TX1") + IN_OUT_ROUTES("ADX4 TX2") + IN_OUT_ROUTES("ADX4 TX3") + IN_OUT_ROUTES("ADX4 TX4") IN_OUT_ROUTES("ARAD1") }; diff --git a/kernel/kernel-5.10/sound/soc/tegra/tegra210_mbdrc.c b/kernel/kernel-5.10/sound/soc/tegra/tegra210_mbdrc.c index d1d18b70c5..fd947a6e67 100644 --- a/kernel/kernel-5.10/sound/soc/tegra/tegra210_mbdrc.c +++ b/kernel/kernel-5.10/sound/soc/tegra/tegra210_mbdrc.c @@ -2,7 +2,7 @@ // // tegra210_mbdrc.c - Tegra210 MBDRC driver // -// Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved. +// Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved. #include #include @@ -430,56 +430,56 @@ static const struct soc_enum tegra210_mbdrc_frame_size_enum = static const DECLARE_TLV_DB_MINMAX(mdbrc_vol_tlv, -25600, 25500); static const struct snd_kcontrol_new tegra210_mbdrc_controls[] = { - SOC_ENUM_EXT("mbdrc peak-rms mode", tegra210_mbdrc_peak_rms_enum, + SOC_ENUM_EXT("MBDRC Peak RMS Mode", tegra210_mbdrc_peak_rms_enum, tegra210_mbdrc_get_enum, tegra210_mbdrc_put_enum), - SOC_ENUM_EXT("mbdrc filter structure", + SOC_ENUM_EXT("MBDRC Filter Structure", tegra210_mbdrc_filter_structure_enum, tegra210_mbdrc_get_enum, tegra210_mbdrc_put_enum), - SOC_ENUM_EXT("mbdrc frame size", tegra210_mbdrc_frame_size_enum, + SOC_ENUM_EXT("MBDRC Frame Size", tegra210_mbdrc_frame_size_enum, tegra210_mbdrc_get_enum, tegra210_mbdrc_put_enum), - SOC_ENUM_EXT("mbdrc mode", tegra210_mbdrc_mode_enum, + SOC_ENUM_EXT("MBDRC Mode", tegra210_mbdrc_mode_enum, tegra210_mbdrc_get_enum, tegra210_mbdrc_put_enum), - SOC_SINGLE_EXT("mbdrc rms offset", TEGRA210_MBDRC_CONFIG, + SOC_SINGLE_EXT("MBDRC RMS Offset", TEGRA210_MBDRC_CONFIG, TEGRA210_MBDRC_CONFIG_RMS_OFFSET_SHIFT, 0x1ff, 0, tegra210_mbdrc_get, tegra210_mbdrc_put), - SOC_SINGLE_EXT("mbdrc shift control", TEGRA210_MBDRC_CONFIG, + SOC_SINGLE_EXT("MBDRC Shift Control", TEGRA210_MBDRC_CONFIG, TEGRA210_MBDRC_CONFIG_SHIFT_CTRL_SHIFT, 0x1f, 0, tegra210_mbdrc_get, tegra210_mbdrc_put), - SOC_SINGLE_EXT("mbdrc fast attack factor", TEGRA210_MBDRC_FAST_FACTOR, + SOC_SINGLE_EXT("MBDRC Fast Attack Factor", TEGRA210_MBDRC_FAST_FACTOR, TEGRA210_MBDRC_FAST_FACTOR_ATTACK_SHIFT, 0xffff, 0, tegra210_mbdrc_get, tegra210_mbdrc_put), - SOC_SINGLE_EXT("mbdrc fast release factor", TEGRA210_MBDRC_FAST_FACTOR, + SOC_SINGLE_EXT("MBDRC Fast Release Factor", TEGRA210_MBDRC_FAST_FACTOR, TEGRA210_MBDRC_FAST_FACTOR_RELEASE_SHIFT, 0xffff, 0, tegra210_mbdrc_get, tegra210_mbdrc_put), - SOC_SINGLE_RANGE_EXT_TLV("mbdrc master volume", TEGRA210_MBDRC_MASTER_VOLUME, + SOC_SINGLE_RANGE_EXT_TLV("MBDRC Master Volume", TEGRA210_MBDRC_MASTER_VOLUME, TEGRA210_MBDRC_MASTER_VOLUME_SHIFT, TEGRA210_MBDRC_MASTER_VOL_MIN, TEGRA210_MBDRC_MASTER_VOL_MAX, 0, tegra210_mbdrc_vol_get, tegra210_mbdrc_vol_put, mdbrc_vol_tlv), - TEGRA_SOC_BYTES_EXT("mbdrc iir stages", TEGRA210_MBDRC_IIR_CONFIG, + TEGRA_SOC_BYTES_EXT("MBDRC IIR Stages", TEGRA210_MBDRC_IIR_CONFIG, TEGRA210_MBDRC_FILTER_COUNT, TEGRA210_MBDRC_IIR_CONFIG_NUM_STAGES_SHIFT, TEGRA210_MBDRC_IIR_CONFIG_NUM_STAGES_MASK, tegra210_mbdrc_band_params_get, tegra210_mbdrc_band_params_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc in attack tc", TEGRA210_MBDRC_IN_ATTACK, + TEGRA_SOC_BYTES_EXT("MBDRC In Attack Time Const", TEGRA210_MBDRC_IN_ATTACK, TEGRA210_MBDRC_FILTER_COUNT, TEGRA210_MBDRC_IN_ATTACK_TC_SHIFT, TEGRA210_MBDRC_IN_ATTACK_TC_MASK, tegra210_mbdrc_band_params_get, tegra210_mbdrc_band_params_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc in release tc", TEGRA210_MBDRC_IN_RELEASE, + TEGRA_SOC_BYTES_EXT("MBDRC In Release Time Const", TEGRA210_MBDRC_IN_RELEASE, TEGRA210_MBDRC_FILTER_COUNT, TEGRA210_MBDRC_IN_RELEASE_TC_SHIFT, TEGRA210_MBDRC_IN_RELEASE_TC_MASK, tegra210_mbdrc_band_params_get, tegra210_mbdrc_band_params_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc fast attack tc", TEGRA210_MBDRC_FAST_ATTACK, + TEGRA_SOC_BYTES_EXT("MBDRC Fast Attack Time Const", TEGRA210_MBDRC_FAST_ATTACK, TEGRA210_MBDRC_FILTER_COUNT, TEGRA210_MBDRC_FAST_ATTACK_TC_SHIFT, TEGRA210_MBDRC_FAST_ATTACK_TC_MASK, @@ -487,48 +487,48 @@ static const struct snd_kcontrol_new tegra210_mbdrc_controls[] = { tegra210_mbdrc_band_params_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc in threshold", TEGRA210_MBDRC_IN_THRESHOLD, + TEGRA_SOC_BYTES_EXT("MBDRC In Threshold", TEGRA210_MBDRC_IN_THRESHOLD, TEGRA210_MBDRC_FILTER_COUNT * 4, 0, 0xffffffff, tegra210_mbdrc_threshold_get, tegra210_mbdrc_threshold_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc out threshold", TEGRA210_MBDRC_OUT_THRESHOLD, + TEGRA_SOC_BYTES_EXT("MBDRC Out Threshold", TEGRA210_MBDRC_OUT_THRESHOLD, TEGRA210_MBDRC_FILTER_COUNT * 4, 0, 0xffffffff, tegra210_mbdrc_threshold_get, tegra210_mbdrc_threshold_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc ratio", TEGRA210_MBDRC_RATIO_1ST, + TEGRA_SOC_BYTES_EXT("MBDRC Ratio", TEGRA210_MBDRC_RATIO_1ST, TEGRA210_MBDRC_FILTER_COUNT * 5, TEGRA210_MBDRC_RATIO_1ST_SHIFT, TEGRA210_MBDRC_RATIO_1ST_MASK, tegra210_mbdrc_band_params_get, tegra210_mbdrc_band_params_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc makeup gain", TEGRA210_MBDRC_MAKEUP_GAIN, + TEGRA_SOC_BYTES_EXT("MBDRC Makeup Gain", TEGRA210_MBDRC_MAKEUP_GAIN, TEGRA210_MBDRC_FILTER_COUNT, TEGRA210_MBDRC_MAKEUP_GAIN_SHIFT, TEGRA210_MBDRC_MAKEUP_GAIN_MASK, tegra210_mbdrc_band_params_get, tegra210_mbdrc_band_params_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc init gain", TEGRA210_MBDRC_INIT_GAIN, + TEGRA_SOC_BYTES_EXT("MBDRC Init Gain", TEGRA210_MBDRC_INIT_GAIN, TEGRA210_MBDRC_FILTER_COUNT, TEGRA210_MBDRC_INIT_GAIN_SHIFT, TEGRA210_MBDRC_INIT_GAIN_MASK, tegra210_mbdrc_band_params_get, tegra210_mbdrc_band_params_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc attack gain", TEGRA210_MBDRC_GAIN_ATTACK, + TEGRA_SOC_BYTES_EXT("MBDRC Attack Gain", TEGRA210_MBDRC_GAIN_ATTACK, TEGRA210_MBDRC_FILTER_COUNT, TEGRA210_MBDRC_GAIN_ATTACK_SHIFT, TEGRA210_MBDRC_GAIN_ATTACK_MASK, tegra210_mbdrc_band_params_get, tegra210_mbdrc_band_params_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc release gain", TEGRA210_MBDRC_GAIN_RELEASE, + TEGRA_SOC_BYTES_EXT("MBDRC Release Gain", TEGRA210_MBDRC_GAIN_RELEASE, TEGRA210_MBDRC_FILTER_COUNT, TEGRA210_MBDRC_GAIN_RELEASE_SHIFT, TEGRA210_MBDRC_GAIN_RELEASE_MASK, tegra210_mbdrc_band_params_get, tegra210_mbdrc_band_params_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc fast release gain", + TEGRA_SOC_BYTES_EXT("MBDRC Fast Release Gain", TEGRA210_MBDRC_FAST_RELEASE, TEGRA210_MBDRC_FILTER_COUNT, TEGRA210_MBDRC_FAST_RELEASE_SHIFT, @@ -537,20 +537,20 @@ static const struct snd_kcontrol_new tegra210_mbdrc_controls[] = { tegra210_mbdrc_band_params_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc low band biquad coeffs", + TEGRA_SOC_BYTES_EXT("MBDRC Low Band Biquad Coeffs", TEGRA210_MBDRC_AHUBRAMCTL_CONFIG_RAM_CTRL, TEGRA210_MBDRC_MAX_BIQUAD_STAGES * 5, 0, 0xffffffff, tegra210_mbdrc_biquad_coeffs_get, tegra210_mbdrc_biquad_coeffs_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc mid band biquad coeffs", + TEGRA_SOC_BYTES_EXT("MBDRC Mid Band Biquad Coeffs", TEGRA210_MBDRC_AHUBRAMCTL_CONFIG_RAM_CTRL + TEGRA210_MBDRC_FILTER_PARAM_STRIDE, TEGRA210_MBDRC_MAX_BIQUAD_STAGES * 5, 0, 0xffffffff, tegra210_mbdrc_biquad_coeffs_get, tegra210_mbdrc_biquad_coeffs_put, tegra210_mbdrc_param_info), - TEGRA_SOC_BYTES_EXT("mbdrc high band biquad coeffs", + TEGRA_SOC_BYTES_EXT("MBDRC High Band Biquad Coeffs", TEGRA210_MBDRC_AHUBRAMCTL_CONFIG_RAM_CTRL + (TEGRA210_MBDRC_FILTER_PARAM_STRIDE * 2), TEGRA210_MBDRC_MAX_BIQUAD_STAGES * 5, 0, 0xffffffff, diff --git a/kernel/kernel-5.10/sound/soc/tegra/tegra210_peq.c b/kernel/kernel-5.10/sound/soc/tegra/tegra210_peq.c index 25f529c4de..974351db18 100644 --- a/kernel/kernel-5.10/sound/soc/tegra/tegra210_peq.c +++ b/kernel/kernel-5.10/sound/soc/tegra/tegra210_peq.c @@ -2,7 +2,7 @@ // // tegra210_peq.c - Tegra210 PEQ driver // -// Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved. +// Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved. #include #include @@ -150,7 +150,7 @@ static int tegra210_peq_param_info(struct snd_kcontrol *kcontrol, } #define TEGRA210_PEQ_GAIN_PARAMS_CTRL(chan) \ - TEGRA_SOC_BYTES_EXT("peq channel" #chan " biquad gain params", \ + TEGRA_SOC_BYTES_EXT("PEQ Channel-" #chan " biquad gain params", \ TEGRA210_PEQ_AHUBRAMCTL_CONFIG_RAM_CTRL, \ TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH, \ (TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH * chan), 0xffffffff, \ @@ -158,7 +158,7 @@ static int tegra210_peq_param_info(struct snd_kcontrol *kcontrol, tegra210_peq_param_info) #define TEGRA210_PEQ_SHIFT_PARAMS_CTRL(chan) \ - TEGRA_SOC_BYTES_EXT("peq channel" #chan " biquad shift params", \ + TEGRA_SOC_BYTES_EXT("PEQ Channel-" #chan " biquad shift params", \ TEGRA210_PEQ_AHUBRAMCTL_CONFIG_RAM_SHIFT_CTRL, \ TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH, \ (TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH * chan), 0x1f, \ @@ -166,10 +166,10 @@ static int tegra210_peq_param_info(struct snd_kcontrol *kcontrol, tegra210_peq_param_info) static const struct snd_kcontrol_new tegra210_peq_controls[] = { - SOC_SINGLE_EXT("peq active", TEGRA210_PEQ_CONFIG, + SOC_SINGLE_EXT("PEQ Active", TEGRA210_PEQ_CONFIG, TEGRA210_PEQ_CONFIG_MODE_SHIFT, 1, 0, tegra210_peq_get, tegra210_peq_put), - SOC_SINGLE_EXT("peq biquad stages", TEGRA210_PEQ_CONFIG, + SOC_SINGLE_EXT("PEQ Biquad Stages", TEGRA210_PEQ_CONFIG, TEGRA210_PEQ_CONFIG_BIQUAD_STAGES_SHIFT, TEGRA210_PEQ_MAX_BIQUAD_STAGES - 1, 0, tegra210_peq_get, tegra210_peq_put), diff --git a/kernel/nvethernetrm/include/config.tmk b/kernel/nvethernetrm/include/config.tmk new file mode 100644 index 0000000000..8df4e70db0 --- /dev/null +++ b/kernel/nvethernetrm/include/config.tmk @@ -0,0 +1,44 @@ +# copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +############################################################################### + +# Set the Makefile config macros to zero by default +OSI_STRIPPED_LIB := 0 +OSI_DEBUG := 0 +DEBUG_MACSEC := 0 + +ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),1) + NV_COMPONENT_CFLAGS += -DOSI_STRIPPED_LIB + OSI_STRIPPED_LIB := 1 +else + NV_COMPONENT_CFLAGS += -DOSI_DEBUG + NV_COMPONENT_CFLAGS += -DDEBUG_MACSEC + OSI_DEBUG := 1 + DEBUG_MACSEC := 1 +endif +NV_COMPONENT_CFLAGS += -DHSI_SUPPORT +NV_COMPONENT_CFLAGS += -DMACSEC_SUPPORT +NV_COMPONENT_CFLAGS += -DLOG_OSI + +#NV_COMPONENT_CFLAGS += -DMACSEC_KEY_PROGRAM +HSI_SUPPORT := 1 +MACSEC_SUPPORT := 1 +ccflags-y += $(NV_COMPONENT_CFLAGS) diff --git a/kernel/nvethernetrm/include/ivc_core.h b/kernel/nvethernetrm/include/ivc_core.h index e8da34fadf..dc3d3726de 100644 --- a/kernel/nvethernetrm/include/ivc_core.h +++ b/kernel/nvethernetrm/include/ivc_core.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -38,7 +38,7 @@ /** * @brief IVC commands between OSD & OSI. */ -typedef enum ivc_cmd { +typedef enum { core_init = 1, core_deinit, write_phy_reg, @@ -46,8 +46,7 @@ typedef enum ivc_cmd { handle_ioctl, init_macsec, deinit_macsec, - handle_ns_irq_macsec, - handle_s_irq_macsec, + handle_irq_macsec, lut_config_macsec, kt_config_macsec, cipher_config, @@ -58,13 +57,15 @@ typedef enum ivc_cmd { dbg_buf_config_macsec, dbg_events_config_macsec, macsec_get_sc_lut_key_index, - macsec_update_mtu_size, + nvethmgr_get_status, + nvethmgr_verify_ts, + nvethmgr_get_avb_perf, }ivc_cmd; /** * @brief IVC arguments structure. */ -typedef struct ivc_args { +typedef struct { /** Number of arguments */ nveu32_t count; /** arguments */ @@ -74,7 +75,7 @@ typedef struct ivc_args { /** * @brief IVC core argument structure. */ -typedef struct ivc_core_args { +typedef struct { /** Number of MTL queues enabled in MAC */ nveu32_t num_mtl_queues; /** Array of MTL queues */ @@ -85,8 +86,6 @@ typedef struct ivc_core_args { nveu32_t rxq_prio[OSI_EQOS_MAX_NUM_CHANS]; /** Ethernet MAC address */ nveu8_t mac_addr[OSI_ETH_ALEN]; - /** Tegra Pre-si platform info */ - nveu32_t pre_si; /** VLAN tag stripping enable(1) or disable(0) */ nveu32_t strip_vlan_tag; /** pause frame support */ @@ -103,15 +102,15 @@ typedef struct ivc_core_args { * @brief macsec config structure. */ #ifdef MACSEC_SUPPORT -typedef struct macsec_config { +typedef struct { /** MACsec secure channel basic information */ struct osi_macsec_sc_info sc_info; /** MACsec enable or disable */ - unsigned int enable; + nveu32_t enable; /** MACsec controller */ - unsigned short ctlr; + nveu16_t ctlr; /** MACsec KT index */ - unsigned short kt_idx; + nveu16_t kt_idx; /** MACsec KT index */ nveu32_t key_index; /** MACsec SCI */ @@ -133,19 +132,20 @@ typedef struct ivc_msg_common { /** message count, used for debug */ nveu32_t count; + /** IVC argument structure */ + ivc_args args; + union { - /** IVC argument structure */ - ivc_args args; -#ifndef OSI_STRIPPED_LIB /** avb algorithm structure */ struct osi_core_avb_algorithm avb_algo; -#endif /** OSI filter structure */ struct osi_filter filter; /** OSI HW features */ struct osi_hw_features hw_feat; /** MMC counters */ - struct osi_mmc_counters mmc; + struct osi_mmc_counters mmc_s; + /** OSI stats counters */ + struct osi_stats stats_s; /** core argument structure */ ivc_core_args init_args; /** ioctl command structure */ @@ -186,14 +186,4 @@ typedef struct ivc_msg_common { */ nve32_t osd_ivc_send_cmd(void *priv, ivc_msg_common_t *ivc_buf, nveu32_t len); - -/** - * @brief ivc_get_core_safety_config - Get core safety config - * - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: Yes - */ -void *ivc_get_core_safety_config(void); #endif /* IVC_CORE_H */ diff --git a/kernel/nvethernetrm/include/mmc.h b/kernel/nvethernetrm/include/mmc.h index 0d3c7ab021..779eb0cd82 100644 --- a/kernel/nvethernetrm/include/mmc.h +++ b/kernel/nvethernetrm/include/mmc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,568 +23,9 @@ #ifndef INCLUDED_MMC_H #define INCLUDED_MMC_H -#include "../osi/common/type.h" +#include #include "osi_common.h" -/** - * @brief osi_mmc_counters - The structure to hold RMON counter values - */ -struct osi_mmc_counters { - /** This counter provides the number of bytes transmitted, exclusive of - * preamble and retried bytes, in good and bad packets */ - nveu64_t mmc_tx_octetcount_gb; - /** This counter provides upper 32 bits of transmitted octet count */ - nveu64_t mmc_tx_octetcount_gb_h; - /** This counter provides the number of good and - * bad packets transmitted, exclusive of retried packets */ - nveu64_t mmc_tx_framecount_gb; - /** This counter provides upper 32 bits of transmitted good and bad - * packets count */ - nveu64_t mmc_tx_framecount_gb_h; - /** This counter provides number of good broadcast - * packets transmitted */ - nveu64_t mmc_tx_broadcastframe_g; - /** This counter provides upper 32 bits of transmitted good broadcast - * packets count */ - nveu64_t mmc_tx_broadcastframe_g_h; - /** This counter provides number of good multicast - * packets transmitted */ - nveu64_t mmc_tx_multicastframe_g; - /** This counter provides upper 32 bits of transmitted good multicast - * packet count */ - nveu64_t mmc_tx_multicastframe_g_h; - /** This counter provides the number of good and bad packets - * transmitted with length 64 bytes, exclusive of preamble and - * retried packets */ - nveu64_t mmc_tx_64_octets_gb; - /** This counter provides upper 32 bits of transmitted 64 octet size - * good and bad packets count */ - nveu64_t mmc_tx_64_octets_gb_h; - /** This counter provides the number of good and bad packets - * transmitted with length 65-127 bytes, exclusive of preamble and - * retried packets */ - nveu64_t mmc_tx_65_to_127_octets_gb; - /** Provides upper 32 bits of transmitted 65-to-127 octet size good and - * bad packets count */ - nveu64_t mmc_tx_65_to_127_octets_gb_h; - /** This counter provides the number of good and bad packets - * transmitted with length 128-255 bytes, exclusive of preamble and - * retried packets */ - nveu64_t mmc_tx_128_to_255_octets_gb; - /** This counter provides upper 32 bits of transmitted 128-to-255 - * octet size good and bad packets count */ - nveu64_t mmc_tx_128_to_255_octets_gb_h; - /** This counter provides the number of good and bad packets - * transmitted with length 256-511 bytes, exclusive of preamble and - * retried packets */ - nveu64_t mmc_tx_256_to_511_octets_gb; - /** This counter provides upper 32 bits of transmitted 256-to-511 - * octet size good and bad packets count. */ - nveu64_t mmc_tx_256_to_511_octets_gb_h; - /** This counter provides the number of good and bad packets - * transmitted with length 512-1023 bytes, exclusive of preamble and - * retried packets */ - nveu64_t mmc_tx_512_to_1023_octets_gb; - /** This counter provides upper 32 bits of transmitted 512-to-1023 - * octet size good and bad packets count.*/ - nveu64_t mmc_tx_512_to_1023_octets_gb_h; - /** This counter provides the number of good and bad packets - * transmitted with length 1024-max bytes, exclusive of preamble and - * retried packets */ - nveu64_t mmc_tx_1024_to_max_octets_gb; - /** This counter provides upper 32 bits of transmitted 1024-tomaxsize - * octet size good and bad packets count. */ - nveu64_t mmc_tx_1024_to_max_octets_gb_h; - /** This counter provides the number of good and bad unicast packets */ - nveu64_t mmc_tx_unicast_gb; - /** This counter provides upper 32 bits of transmitted good bad - * unicast packets count */ - nveu64_t mmc_tx_unicast_gb_h; - /** This counter provides the number of good and bad - * multicast packets */ - nveu64_t mmc_tx_multicast_gb; - /** This counter provides upper 32 bits of transmitted good bad - * multicast packets count */ - nveu64_t mmc_tx_multicast_gb_h; - /** This counter provides the number of good and bad - * broadcast packets */ - nveu64_t mmc_tx_broadcast_gb; - /** This counter provides upper 32 bits of transmitted good bad - * broadcast packets count */ - nveu64_t mmc_tx_broadcast_gb_h; - /** This counter provides the number of abort packets due to - * underflow error */ - nveu64_t mmc_tx_underflow_error; - /** This counter provides upper 32 bits of abort packets due to - * underflow error */ - nveu64_t mmc_tx_underflow_error_h; - /** This counter provides the number of successfully transmitted - * packets after a single collision in the half-duplex mode */ - nveu64_t mmc_tx_singlecol_g; - /** This counter provides the number of successfully transmitted - * packets after a multi collision in the half-duplex mode */ - nveu64_t mmc_tx_multicol_g; - /** This counter provides the number of successfully transmitted - * after a deferral in the half-duplex mode */ - nveu64_t mmc_tx_deferred; - /** This counter provides the number of packets aborted because of - * late collision error */ - nveu64_t mmc_tx_latecol; - /** This counter provides the number of packets aborted because of - * excessive (16) collision errors */ - nveu64_t mmc_tx_exesscol; - /** This counter provides the number of packets aborted because of - * carrier sense error (no carrier or loss of carrier) */ - nveu64_t mmc_tx_carrier_error; - /** This counter provides the number of bytes transmitted, - * exclusive of preamble, only in good packets */ - nveu64_t mmc_tx_octetcount_g; - /** This counter provides upper 32 bytes of bytes transmitted, - * exclusive of preamble, only in good packets */ - nveu64_t mmc_tx_octetcount_g_h; - /** This counter provides the number of good packets transmitted */ - nveu64_t mmc_tx_framecount_g; - /** This counter provides upper 32 bytes of good packets transmitted */ - nveu64_t mmc_tx_framecount_g_h; - /** This counter provides the number of packets aborted because of - * excessive deferral error - * (deferred for more than two max-sized packet times) */ - nveu64_t mmc_tx_excessdef; - /** This counter provides the number of good Pause - * packets transmitted */ - nveu64_t mmc_tx_pause_frame; - /** This counter provides upper 32 bytes of good Pause - * packets transmitted */ - nveu64_t mmc_tx_pause_frame_h; - /** This counter provides the number of good VLAN packets transmitted */ - nveu64_t mmc_tx_vlan_frame_g; - /** This counter provides upper 32 bytes of good VLAN packets - * transmitted */ - nveu64_t mmc_tx_vlan_frame_g_h; - /** This counter provides the number of packets transmitted without - * errors and with length greater than the maxsize (1,518 or 1,522 bytes - * for VLAN tagged packets; 2000 bytes */ - nveu64_t mmc_tx_osize_frame_g; - /** This counter provides the number of good and bad packets received */ - nveu64_t mmc_rx_framecount_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received */ - nveu64_t mmc_rx_framecount_gb_h; - /** This counter provides the number of bytes received by DWC_ther_qos, - * exclusive of preamble, in good and bad packets */ - nveu64_t mmc_rx_octetcount_gb; - /** This counter provides upper 32 bytes of bytes received by - * DWC_ether_qos, exclusive of preamble, in good and bad packets */ - nveu64_t mmc_rx_octetcount_gb_h; - /** This counter provides the number of bytes received by DWC_ether_qos, - * exclusive of preamble, in good and bad packets */ - nveu64_t mmc_rx_octetcount_g; - /** This counter provides upper 32 bytes of bytes received by - * DWC_ether_qos, exclusive of preamble, in good and bad packets */ - nveu64_t mmc_rx_octetcount_g_h; - /** This counter provides the number of good - * broadcast packets received */ - nveu64_t mmc_rx_broadcastframe_g; - /** This counter provides upper 32 bytes of good - * broadcast packets received */ - nveu64_t mmc_rx_broadcastframe_g_h; - /** This counter provides the number of good - * multicast packets received */ - nveu64_t mmc_rx_multicastframe_g; - /** This counter provides upper 32 bytes of good - * multicast packets received */ - nveu64_t mmc_rx_multicastframe_g_h; - /** This counter provides the number of packets - * received with CRC error */ - nveu64_t mmc_rx_crc_error; - /** This counter provides upper 32 bytes of packets - * received with CRC error */ - nveu64_t mmc_rx_crc_error_h; - /** This counter provides the number of packets received with - * alignment (dribble) error. It is valid only in 10/100 mode */ - nveu64_t mmc_rx_align_error; - /** This counter provides the number of packets received with - * runt (length less than 64 bytes and CRC error) error */ - nveu64_t mmc_rx_runt_error; - /** This counter provides the number of giant packets received with - * length (including CRC) greater than 1,518 bytes (1,522 bytes for - * VLAN tagged) and with CRC error */ - nveu64_t mmc_rx_jabber_error; - /** This counter provides the number of packets received with length - * less than 64 bytes, without any errors */ - nveu64_t mmc_rx_undersize_g; - /** This counter provides the number of packets received without error, - * with length greater than the maxsize */ - nveu64_t mmc_rx_oversize_g; - /** This counter provides the number of good and bad packets received - * with length 64 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_64_octets_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received with length 64 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_64_octets_gb_h; - /** This counter provides the number of good and bad packets received - * with length 65-127 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_65_to_127_octets_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received with length 65-127 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_65_to_127_octets_gb_h; - /** This counter provides the number of good and bad packets received - * with length 128-255 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_128_to_255_octets_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received with length 128-255 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_128_to_255_octets_gb_h; - /** This counter provides the number of good and bad packets received - * with length 256-511 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_256_to_511_octets_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received with length 256-511 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_256_to_511_octets_gb_h; - /** This counter provides the number of good and bad packets received - * with length 512-1023 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_512_to_1023_octets_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received with length 512-1023 bytes, exclusive of the preamble */ - nveu64_t mmc_rx_512_to_1023_octets_gb_h; - /** This counter provides the number of good and bad packets received - * with length 1024-maxbytes, exclusive of the preamble */ - nveu64_t mmc_rx_1024_to_max_octets_gb; - /** This counter provides upper 32 bytes of good and bad packets - * received with length 1024-maxbytes, exclusive of the preamble */ - nveu64_t mmc_rx_1024_to_max_octets_gb_h; - /** This counter provides the number of good unicast packets received */ - nveu64_t mmc_rx_unicast_g; - /** This counter provides upper 32 bytes of good unicast packets - * received */ - nveu64_t mmc_rx_unicast_g_h; - /** This counter provides the number of packets received with length - * error (Length Type field not equal to packet size), for all packets - * with valid length field */ - nveu64_t mmc_rx_length_error; - /** This counter provides upper 32 bytes of packets received with - * length error (Length Type field not equal to packet size), for all - * packets with valid length field */ - nveu64_t mmc_rx_length_error_h; - /** This counter provides the number of packets received with length - * field not equal to the valid packet size (greater than 1,500 but - * less than 1,536) */ - nveu64_t mmc_rx_outofrangetype; - /** This counter provides upper 32 bytes of packets received with - * length field not equal to the valid packet size (greater than 1,500 - * but less than 1,536) */ - nveu64_t mmc_rx_outofrangetype_h; - /** This counter provides the number of good and valid Pause packets - * received */ - nveu64_t mmc_rx_pause_frames; - /** This counter provides upper 32 bytes of good and valid Pause packets - * received */ - nveu64_t mmc_rx_pause_frames_h; - /** This counter provides the number of missed received packets - * because of FIFO overflow in DWC_ether_qos */ - nveu64_t mmc_rx_fifo_overflow; - /** This counter provides upper 32 bytes of missed received packets - * because of FIFO overflow in DWC_ether_qos */ - nveu64_t mmc_rx_fifo_overflow_h; - /** This counter provides the number of good and bad VLAN packets - * received */ - nveu64_t mmc_rx_vlan_frames_gb; - /** This counter provides upper 32 bytes of good and bad VLAN packets - * received */ - nveu64_t mmc_rx_vlan_frames_gb_h; - /** This counter provides the number of packets received with error - * because of watchdog timeout error */ - nveu64_t mmc_rx_watchdog_error; - /** This counter provides the number of packets received with Receive - * error or Packet Extension error on the GMII or MII interface */ - nveu64_t mmc_rx_receive_error; - /** This counter provides the number of packets received with Receive - * error or Packet Extension error on the GMII or MII interface */ - nveu64_t mmc_rx_ctrl_frames_g; - /** This counter provides the number of microseconds Tx LPI is asserted - * in the MAC controller */ - nveu64_t mmc_tx_lpi_usec_cntr; - /** This counter provides the number of times MAC controller has - * entered Tx LPI. */ - nveu64_t mmc_tx_lpi_tran_cntr; - /** This counter provides the number of microseconds Rx LPI is asserted - * in the MAC controller */ - nveu64_t mmc_rx_lpi_usec_cntr; - /** This counter provides the number of times MAC controller has - * entered Rx LPI.*/ - nveu64_t mmc_rx_lpi_tran_cntr; - /** This counter provides the number of good IPv4 datagrams received - * with the TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv4_gd; - /** This counter provides upper 32 bytes of good IPv4 datagrams received - * with the TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv4_gd_h; - /** RxIPv4 Header Error Packets */ - nveu64_t mmc_rx_ipv4_hderr; - /** RxIPv4 of upper 32 bytes of Header Error Packets */ - nveu64_t mmc_rx_ipv4_hderr_h; - /** This counter provides the number of IPv4 datagram packets received - * that did not have a TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv4_nopay; - /** This counter provides upper 32 bytes of IPv4 datagram packets - * received that did not have a TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv4_nopay_h; - /** This counter provides the number of good IPv4 datagrams received - * with fragmentation */ - nveu64_t mmc_rx_ipv4_frag; - /** This counter provides upper 32 bytes of good IPv4 datagrams received - * with fragmentation */ - nveu64_t mmc_rx_ipv4_frag_h; - /** This counter provides the number of good IPv4 datagrams received - * that had a UDP payload with checksum disabled */ - nveu64_t mmc_rx_ipv4_udsbl; - /** This counter provides upper 32 bytes of good IPv4 datagrams received - * that had a UDP payload with checksum disabled */ - nveu64_t mmc_rx_ipv4_udsbl_h; - /** This counter provides the number of good IPv6 datagrams received - * with the TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv6_gd_octets; - /** This counter provides upper 32 bytes of good IPv6 datagrams received - * with the TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv6_gd_octets_h; - /** This counter provides the number of IPv6 datagrams received - * with header (length or version mismatch) errors */ - nveu64_t mmc_rx_ipv6_hderr_octets; - /** This counter provides the number of IPv6 datagrams received - * with header (length or version mismatch) errors */ - nveu64_t mmc_rx_ipv6_hderr_octets_h; - /** This counter provides the number of IPv6 datagram packets received - * that did not have a TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv6_nopay_octets; - /** This counter provides upper 32 bytes of IPv6 datagram packets - * received that did not have a TCP, UDP, or ICMP payload */ - nveu64_t mmc_rx_ipv6_nopay_octets_h; - /* Protocols */ - /** This counter provides the number of good IP datagrams received by - * DWC_ether_qos with a good UDP payload */ - nveu64_t mmc_rx_udp_gd; - /** This counter provides upper 32 bytes of good IP datagrams received - * by DWC_ether_qos with a good UDP payload */ - nveu64_t mmc_rx_udp_gd_h; - /** This counter provides the number of good IP datagrams received by - * DWC_ether_qos with a good UDP payload. This counter is not updated - * when the RxIPv4_UDP_Checksum_Disabled_Packets counter is - * incremented */ - nveu64_t mmc_rx_udp_err; - /** This counter provides upper 32 bytes of good IP datagrams received - * by DWC_ether_qos with a good UDP payload. This counter is not updated - * when the RxIPv4_UDP_Checksum_Disabled_Packets counter is - * incremented */ - nveu64_t mmc_rx_udp_err_h; - /** This counter provides the number of good IP datagrams received - * with a good TCP payload */ - nveu64_t mmc_rx_tcp_gd; - /** This counter provides the number of good IP datagrams received - * with a good TCP payload */ - nveu64_t mmc_rx_tcp_gd_h; - /** This counter provides upper 32 bytes of good IP datagrams received - * with a good TCP payload */ - nveu64_t mmc_rx_tcp_err; - /** This counter provides upper 32 bytes of good IP datagrams received - * with a good TCP payload */ - nveu64_t mmc_rx_tcp_err_h; - /** This counter provides the number of good IP datagrams received - * with a good ICMP payload */ - nveu64_t mmc_rx_icmp_gd; - /** This counter provides upper 32 bytes of good IP datagrams received - * with a good ICMP payload */ - nveu64_t mmc_rx_icmp_gd_h; - /** This counter provides the number of good IP datagrams received - * whose ICMP payload has a checksum error */ - nveu64_t mmc_rx_icmp_err; - /** This counter provides upper 32 bytes of good IP datagrams received - * whose ICMP payload has a checksum error */ - nveu64_t mmc_rx_icmp_err_h; - /** This counter provides the number of bytes received by DWC_ether_qos - * in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv4_gd_octets; - /** This counter provides upper 32 bytes received by DWC_ether_qos - * in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv4_gd_octets_h; - /** This counter provides the number of bytes received in IPv4 datagram - * with header errors (checksum, length, version mismatch). The value - * in the Length field of IPv4 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv4_hderr_octets; - /** This counter provides upper 32 bytes received in IPv4 datagram - * with header errors (checksum, length, version mismatch). The value - * in the Length field of IPv4 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv4_hderr_octets_h; - /** This counter provides the number of bytes received in IPv4 datagram - * that did not have a TCP, UDP, or ICMP payload. The value in the - * Length field of IPv4 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv4_nopay_octets; - /** This counter provides upper 32 bytes received in IPv4 datagram - * that did not have a TCP, UDP, or ICMP payload. The value in the - * Length field of IPv4 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv4_nopay_octets_h; - /** This counter provides the number of bytes received in fragmented - * IPv4 datagrams. The value in the Length field of IPv4 header is - * used to update this counter. (Ethernet header, FCS, pad, or IP pad - * bytes are not included in this counter */ - nveu64_t mmc_rx_ipv4_frag_octets; - /** This counter provides upper 32 bytes received in fragmented - * IPv4 datagrams. The value in the Length field of IPv4 header is - * used to update this counter. (Ethernet header, FCS, pad, or IP pad - * bytes are not included in this counter */ - nveu64_t mmc_rx_ipv4_frag_octets_h; - /** This counter provides the number of bytes received in a UDP segment - * that had the UDP checksum disabled. This counter does not count IP - * Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not - * included in this counter */ - nveu64_t mmc_rx_ipv4_udsbl_octets; - /** This counter provides upper 32 bytes received in a UDP segment - * that had the UDP checksum disabled. This counter does not count IP - * Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not - * included in this counter */ - nveu64_t mmc_rx_ipv4_udsbl_octets_h; - /** This counter provides the number of bytes received in good IPv6 - * datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header, - * FCS, pad, or IP pad bytes are not included in this counter */ - nveu64_t mmc_rx_ipv6_gd; - /** This counter provides upper 32 bytes received in good IPv6 - * datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header, - * FCS, pad, or IP pad bytes are not included in this counter */ - nveu64_t mmc_rx_ipv6_gd_h; - /** This counter provides the number of bytes received in IPv6 datagrams - * with header errors (length, version mismatch). The value in the - * Length field of IPv6 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included in - * this counter */ - nveu64_t mmc_rx_ipv6_hderr; - /** This counter provides upper 32 bytes received in IPv6 datagrams - * with header errors (length, version mismatch). The value in the - * Length field of IPv6 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included in - * this counter */ - nveu64_t mmc_rx_ipv6_hderr_h; - /** This counter provides the number of bytes received in IPv6 - * datagrams that did not have a TCP, UDP, or ICMP payload. The value - * in the Length field of IPv6 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv6_nopay; - /** This counter provides upper 32 bytes received in IPv6 - * datagrams that did not have a TCP, UDP, or ICMP payload. The value - * in the Length field of IPv6 header is used to update this counter. - * (Ethernet header, FCS, pad, or IP pad bytes are not included - * in this counter */ - nveu64_t mmc_rx_ipv6_nopay_h; - /* Protocols */ - /** This counter provides the number of bytes received in a good UDP - * segment. This counter does not count IP header bytes */ - nveu64_t mmc_rx_udp_gd_octets; - /** This counter provides upper 32 bytes received in a good UDP - * segment. This counter does not count IP header bytes */ - nveu64_t mmc_rx_udp_gd_octets_h; - /** This counter provides the number of bytes received in a UDP - * segment that had checksum errors. This counter does not count - * IP header bytes */ - nveu64_t mmc_rx_udp_err_octets; - /** This counter provides upper 32 bytes received in a UDP - * segment that had checksum errors. This counter does not count - * IP header bytes */ - nveu64_t mmc_rx_udp_err_octets_h; - /** This counter provides the number of bytes received in a good - * TCP segment. This counter does not count IP header bytes */ - nveu64_t mmc_rx_tcp_gd_octets; - /** This counter provides upper 32 bytes received in a good - * TCP segment. This counter does not count IP header bytes */ - nveu64_t mmc_rx_tcp_gd_octets_h; - /** This counter provides the number of bytes received in a TCP - * segment that had checksum errors. This counter does not count - * IP header bytes */ - nveu64_t mmc_rx_tcp_err_octets; - /** This counter provides upper 32 bytes received in a TCP - * segment that had checksum errors. This counter does not count - * IP header bytes */ - nveu64_t mmc_rx_tcp_err_octets_h; - /** This counter provides the number of bytes received in a good - * ICMP segment. This counter does not count IP header bytes */ - nveu64_t mmc_rx_icmp_gd_octets; - /** This counter provides upper 32 bytes received in a good - * ICMP segment. This counter does not count IP header bytes */ - nveu64_t mmc_rx_icmp_gd_octets_h; - /** This counter provides the number of bytes received in a ICMP - * segment that had checksum errors. This counter does not count - * IP header bytes */ - nveu64_t mmc_rx_icmp_err_octets; - /** This counter provides upper 32 bytes received in a ICMP - * segment that had checksum errors. This counter does not count - * IP header bytes */ - nveu64_t mmc_rx_icmp_err_octets_h; - /** This counter provides the number of additional mPackets - * transmitted due to preemption */ - unsigned long mmc_tx_fpe_frag_cnt; - /** This counter provides the count of number of times a hold - * request is given to MAC */ - unsigned long mmc_tx_fpe_hold_req_cnt; - /** This counter provides the number of MAC frames with reassembly - * errors on the Receiver, due to mismatch in the fragment - * count value */ - unsigned long mmc_rx_packet_reass_err_cnt; - /** This counter the number of received MAC frames rejected - * due to unknown SMD value and MAC frame fragments rejected due - * to arriving with an SMD-C when there was no preceding preempted - * frame */ - unsigned long mmc_rx_packet_smd_err_cnt; - /** This counter provides the number of MAC frames that were - * successfully reassembled and delivered to MAC */ - unsigned long mmc_rx_packet_asm_ok_cnt; - /** This counter provides the number of additional mPackets received - * due to preemption */ - unsigned long mmc_rx_fpe_fragment_cnt; -}; - -/** - * @brief osi_xtra_stat_counters - OSI core extra stat counters - */ -struct osi_xtra_stat_counters { - /** RX buffer unavailable irq count */ - nveu64_t rx_buf_unavail_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** Transmit Process Stopped irq count */ - nveu64_t tx_proc_stopped_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** Transmit Buffer Unavailable irq count */ - nveu64_t tx_buf_unavail_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** Receive Process Stopped irq count */ - nveu64_t rx_proc_stopped_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** Receive Watchdog Timeout irq count */ - nveu64_t rx_watchdog_irq_n; - /** Fatal Bus Error irq count */ - nveu64_t fatal_bus_error_irq_n; - /** rx skb allocation failure count */ - nveu64_t re_alloc_rxbuf_failed[OSI_MGBE_MAX_NUM_QUEUES]; - /** TX per channel interrupt count */ - nveu64_t tx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** TX per channel SW timer callback count */ - nveu64_t tx_usecs_swtimer_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** RX per channel interrupt count */ - nveu64_t rx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; - /** link connect count */ - nveu64_t link_connect_count; - /** link disconnect count */ - nveu64_t link_disconnect_count; - /** lock fail count node addition */ - nveu64_t ts_lock_add_fail; - /** lock fail count node removal */ - nveu64_t ts_lock_del_fail; -}; - #ifdef MACSEC_SUPPORT /** * @brief The structure hold macsec statistics counters diff --git a/kernel/nvethernetrm/osi/common/type.h b/kernel/nvethernetrm/include/nvethernet_type.h similarity index 92% rename from kernel/nvethernetrm/osi/common/type.h rename to kernel/nvethernetrm/include/nvethernet_type.h index d2ed7c722e..c4384de1c1 100644 --- a/kernel/nvethernetrm/osi/common/type.h +++ b/kernel/nvethernetrm/include/nvethernet_type.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -37,8 +37,6 @@ typedef unsigned int my_uint32_t; typedef int my_int32_t; /** intermediate type for unsigned short */ typedef unsigned short my_uint16_t; -/** intermediate type for short */ -typedef short my_int16_t; /** intermediate type for char */ typedef char my_int8_t; /** intermediate type for unsigned char */ @@ -55,8 +53,6 @@ typedef my_uint32_t nveu32_t; typedef my_int32_t nve32_t; /** typedef equivalent to unsigned short */ typedef my_uint16_t nveu16_t; -/** typedef equivalent to short */ -typedef my_int16_t nve16_t; /** typedef equivalent to char */ typedef my_int8_t nve8_t; /** typedef equivalent to unsigned char */ @@ -68,3 +64,4 @@ typedef my_uint64_t nveu64_t; /** @} */ #endif /* INCLUDED_TYPE_H */ + diff --git a/kernel/nvethernetrm/include/nvethernetrm_export.h b/kernel/nvethernetrm/include/nvethernetrm_export.h new file mode 100644 index 0000000000..28ee9b2044 --- /dev/null +++ b/kernel/nvethernetrm/include/nvethernetrm_export.h @@ -0,0 +1,775 @@ +/* + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef INCLUDED_NVETHERNETRM_EXPORT_H +#define INCLUDED_NVETHERNETRM_EXPORT_H + +#include + +/** + * @addtogroup Helper MACROS + * + * @brief EQOS generic helper MACROS. + * @{ + */ +#define OSI_GCL_SIZE_256 256U +#define OSI_MAX_TC_NUM 8U +/* Ethernet Address length */ +#define OSI_ETH_ALEN 6U +/** @} */ + +/** + * @addtogroup Flexible Receive Parser related information + * + * @brief Flexible Receive Parser commands, table size and other defines + * @{ + */ +/* Match data defines */ +#define OSI_FRP_MATCH_DATA_MAX 12U +/** @} */ + +/** + * @addtogroup MTL queue operation mode + * + * @brief MTL queue operation mode options + * @{ + */ +#define OSI_MTL_QUEUE_AVB 0x1U +#define OSI_MTL_QUEUE_ENABLE 0x2U +#define OSI_MTL_QUEUE_MODEMAX 0x3U +#ifndef OSI_STRIPPED_LIB +#define OSI_MTL_MAX_NUM_QUEUES 10U +#endif +/** @} */ + +/** + * @addtogroup EQOS_MTL MTL queue AVB algorithm mode + * + * @brief MTL AVB queue algorithm type + * @{ + */ +#define OSI_MTL_TXQ_AVALG_CBS 1U +#define OSI_MTL_TXQ_AVALG_SP 0U +/** @} */ + +#ifndef OSI_STRIPPED_LIB +/** + * @addtogroup Helper MACROS + * + * @brief EQOS generic helper MACROS. + * @{ + */ +/* L2 DA filter mode(enable/disable) */ +#define OSI_OPER_EN_L2_DA_INV OSI_BIT(4) +#define OSI_OPER_DIS_L2_DA_INV OSI_BIT(5) +#endif /* !OSI_STRIPPED_LIB */ + +/* Ethernet Address length */ +#define OSI_ETH_ALEN 6U +#define OSI_MAX_TC_NUM 8U +/** @} */ + +#pragma pack(push, 1) +/** + * @brief FRP command structure for OSD to OSI + */ +struct osi_core_frp_cmd { + /** FRP Command type */ + nveu32_t cmd; + /** OSD FRP ID */ + nve32_t frp_id; + /** OSD match data type */ + nveu8_t match_type; + /** OSD match data */ + nveu8_t match[OSI_FRP_MATCH_DATA_MAX]; + /** OSD match data length */ + nveu8_t match_length; + /** OSD Offset */ + nveu8_t offset; + /** OSD FRP filter mode flag */ + nveu8_t filter_mode; + /** OSD FRP Link ID */ + nve32_t next_frp_id; + /** OSD DMA Channel Selection + * Bit selection of DMA channels to route the frame + * Bit[0] - DMA channel 0 + * .. + * Bit [N] - DMA channel N] */ + nveu32_t dma_sel; +}; + +/** + * @brief OSI Core avb data structure per queue. + */ +struct osi_core_avb_algorithm { + /** TX Queue/TC index */ + nveu32_t qindex; + /** CBS Algorithm enable(1) or disable(0) */ + nveu32_t algo; + /** When this bit is set, the accumulated credit parameter in the + * credit-based shaper algorithm logic is not reset to zero when + * there is positive credit and no packet to transmit in the channel. + * + * Expected values are enable(1) or disable(0) */ + nveu32_t credit_control; + /** idleSlopeCredit value required for CBS + * Max value for EQOS - 0x000FFFFFU + * Max value for MGBE - 0x001FFFFFU */ + nveu32_t idle_slope; + /** sendSlopeCredit value required for CBS + * Max value for EQOS - 0x0000FFFFU + * Max value for MGBE - 0x00003FFFU */ + nveu32_t send_slope; + /** hiCredit value required for CBS + * Max value - 0x1FFFFFFFU */ + nveu32_t hi_credit; + /** lowCredit value required for CBS + * Max value - 0x1FFFFFFFU */ + nveu32_t low_credit; + /** Transmit queue operating mode + * + * 00: disable + * + * 01: avb + * + * 10: enable */ + nveu32_t oper_mode; + /** TC index + * value 0 to 7 represent 8 TC */ + nveu32_t tcindex; +}; + +/** + * @brief OSI Core EST structure + */ +struct osi_est_config { + /** enable/disable */ + nveu32_t en_dis; + /** 64 bit base time register + * if both values are 0, take ptp time to avoid BTRE + * index 0 for nsec, index 1 for sec + */ + nveu32_t btr[2]; + /** 64 bit base time offset index 0 for nsec, index 1 for sec + * 32 bits for Seconds, 32 bits for nanoseconds (max 10^9) */ + nveu32_t btr_offset[2]; + /** 40 bits cycle time register, index 0 for nsec, index 1 for sec + * 8 bits for Seconds, 32 bits for nanoseconds (max 10^9) */ + nveu32_t ctr[2]; + /** Configured Time Interval width(24 bits) + 7 bits + * extension register */ + nveu32_t ter; + /** size of the gate control list Max 256 entries + * valid value range (1-255)*/ + nveu32_t llr; + /** data array 8 bit gate op + 24 execution time + * MGBE HW support GCL depth 256 */ + nveu32_t gcl[OSI_GCL_SIZE_256]; +}; + +/** + * @brief OSI Core FPE structure + */ +struct osi_fpe_config { + /** Queue Mask 1 - preemption 0 - express + * bit representation*/ + nveu32_t tx_queue_preemption_enable; + /** RQ for all preemptable packets which are not filtered + * based on user priority or SA-DA + * Value range for EQOS 1-7 + * Value range for MGBE 1-9 */ + nveu32_t rq; +}; + +/** + * @brief OSI Core error stats structure + */ +struct osi_stats { + /** Constant Gate Control Error */ + nveu64_t const_gate_ctr_err; + /** Head-Of-Line Blocking due to Scheduling */ + nveu64_t head_of_line_blk_sch; + /** Per TC Schedule Error */ + nveu64_t hlbs_q[OSI_MAX_TC_NUM]; + /** Head-Of-Line Blocking due to Frame Size */ + nveu64_t head_of_line_blk_frm; + /** Per TC Frame Size Error */ + nveu64_t hlbf_q[OSI_MAX_TC_NUM]; + /** BTR Error */ + nveu64_t base_time_reg_err; + /** Switch to Software Owned List Complete */ + nveu64_t sw_own_list_complete; +#ifndef OSI_STRIPPED_LIB + /** IP Header Error */ + nveu64_t mgbe_ip_header_err; + /** Jabber time out Error */ + nveu64_t mgbe_jabber_timeout_err; + /** Payload Checksum Error */ + nveu64_t mgbe_payload_cs_err; + /** Under Flow Error */ + nveu64_t mgbe_tx_underflow_err; + /** RX buffer unavailable irq count */ + nveu64_t rx_buf_unavail_irq_n[OSI_MTL_MAX_NUM_QUEUES]; + /** Transmit Process Stopped irq count */ + nveu64_t tx_proc_stopped_irq_n[OSI_MTL_MAX_NUM_QUEUES]; + /** Transmit Buffer Unavailable irq count */ + nveu64_t tx_buf_unavail_irq_n[OSI_MTL_MAX_NUM_QUEUES]; + /** Receive Process Stopped irq count */ + nveu64_t rx_proc_stopped_irq_n[OSI_MTL_MAX_NUM_QUEUES]; + /** Receive Watchdog Timeout irq count */ + nveu64_t rx_watchdog_irq_n; + /** Fatal Bus Error irq count */ + nveu64_t fatal_bus_error_irq_n; + /** lock fail count node addition */ + nveu64_t ts_lock_add_fail; + /** lock fail count node removal */ + nveu64_t ts_lock_del_fail; +#endif +}; + +/** + * @brief osi_mmc_counters - The structure to hold RMON counter values + */ +struct osi_mmc_counters { + /** This counter provides the number of bytes transmitted, exclusive of + * preamble and retried bytes, in good and bad packets */ + nveu64_t mmc_tx_octetcount_gb; + /** This counter provides upper 32 bits of transmitted octet count */ + nveu64_t mmc_tx_octetcount_gb_h; + /** This counter provides the number of good and + * bad packets transmitted, exclusive of retried packets */ + nveu64_t mmc_tx_framecount_gb; + /** This counter provides upper 32 bits of transmitted good and bad + * packets count */ + nveu64_t mmc_tx_framecount_gb_h; + /** This counter provides number of good broadcast + * packets transmitted */ + nveu64_t mmc_tx_broadcastframe_g; + /** This counter provides upper 32 bits of transmitted good broadcast + * packets count */ + nveu64_t mmc_tx_broadcastframe_g_h; + /** This counter provides number of good multicast + * packets transmitted */ + nveu64_t mmc_tx_multicastframe_g; + /** This counter provides upper 32 bits of transmitted good multicast + * packet count */ + nveu64_t mmc_tx_multicastframe_g_h; + /** This counter provides the number of good and bad packets + * transmitted with length 64 bytes, exclusive of preamble and + * retried packets */ + nveu64_t mmc_tx_64_octets_gb; + /** This counter provides upper 32 bits of transmitted 64 octet size + * good and bad packets count */ + nveu64_t mmc_tx_64_octets_gb_h; + /** This counter provides the number of good and bad packets + * transmitted with length 65-127 bytes, exclusive of preamble and + * retried packets */ + nveu64_t mmc_tx_65_to_127_octets_gb; + /** Provides upper 32 bits of transmitted 65-to-127 octet size good and + * bad packets count */ + nveu64_t mmc_tx_65_to_127_octets_gb_h; + /** This counter provides the number of good and bad packets + * transmitted with length 128-255 bytes, exclusive of preamble and + * retried packets */ + nveu64_t mmc_tx_128_to_255_octets_gb; + /** This counter provides upper 32 bits of transmitted 128-to-255 + * octet size good and bad packets count */ + nveu64_t mmc_tx_128_to_255_octets_gb_h; + /** This counter provides the number of good and bad packets + * transmitted with length 256-511 bytes, exclusive of preamble and + * retried packets */ + nveu64_t mmc_tx_256_to_511_octets_gb; + /** This counter provides upper 32 bits of transmitted 256-to-511 + * octet size good and bad packets count. */ + nveu64_t mmc_tx_256_to_511_octets_gb_h; + /** This counter provides the number of good and bad packets + * transmitted with length 512-1023 bytes, exclusive of preamble and + * retried packets */ + nveu64_t mmc_tx_512_to_1023_octets_gb; + /** This counter provides upper 32 bits of transmitted 512-to-1023 + * octet size good and bad packets count.*/ + nveu64_t mmc_tx_512_to_1023_octets_gb_h; + /** This counter provides the number of good and bad packets + * transmitted with length 1024-max bytes, exclusive of preamble and + * retried packets */ + nveu64_t mmc_tx_1024_to_max_octets_gb; + /** This counter provides upper 32 bits of transmitted 1024-tomaxsize + * octet size good and bad packets count. */ + nveu64_t mmc_tx_1024_to_max_octets_gb_h; + /** This counter provides the number of good and bad unicast packets */ + nveu64_t mmc_tx_unicast_gb; + /** This counter provides upper 32 bits of transmitted good bad + * unicast packets count */ + nveu64_t mmc_tx_unicast_gb_h; + /** This counter provides the number of good and bad + * multicast packets */ + nveu64_t mmc_tx_multicast_gb; + /** This counter provides upper 32 bits of transmitted good bad + * multicast packets count */ + nveu64_t mmc_tx_multicast_gb_h; + /** This counter provides the number of good and bad + * broadcast packets */ + nveu64_t mmc_tx_broadcast_gb; + /** This counter provides upper 32 bits of transmitted good bad + * broadcast packets count */ + nveu64_t mmc_tx_broadcast_gb_h; + /** This counter provides the number of abort packets due to + * underflow error */ + nveu64_t mmc_tx_underflow_error; + /** This counter provides upper 32 bits of abort packets due to + * underflow error */ + nveu64_t mmc_tx_underflow_error_h; + /** This counter provides the number of successfully transmitted + * packets after a single collision in the half-duplex mode */ + nveu64_t mmc_tx_singlecol_g; + /** This counter provides the number of successfully transmitted + * packets after a multi collision in the half-duplex mode */ + nveu64_t mmc_tx_multicol_g; + /** This counter provides the number of successfully transmitted + * after a deferral in the half-duplex mode */ + nveu64_t mmc_tx_deferred; + /** This counter provides the number of packets aborted because of + * late collision error */ + nveu64_t mmc_tx_latecol; + /** This counter provides the number of packets aborted because of + * excessive (16) collision errors */ + nveu64_t mmc_tx_exesscol; + /** This counter provides the number of packets aborted because of + * carrier sense error (no carrier or loss of carrier) */ + nveu64_t mmc_tx_carrier_error; + /** This counter provides the number of bytes transmitted, + * exclusive of preamble, only in good packets */ + nveu64_t mmc_tx_octetcount_g; + /** This counter provides upper 32 bytes of bytes transmitted, + * exclusive of preamble, only in good packets */ + nveu64_t mmc_tx_octetcount_g_h; + /** This counter provides the number of good packets transmitted */ + nveu64_t mmc_tx_framecount_g; + /** This counter provides upper 32 bytes of good packets transmitted */ + nveu64_t mmc_tx_framecount_g_h; + /** This counter provides the number of packets aborted because of + * excessive deferral error + * (deferred for more than two max-sized packet times) */ + nveu64_t mmc_tx_excessdef; + /** This counter provides the number of good Pause + * packets transmitted */ + nveu64_t mmc_tx_pause_frame; + /** This counter provides upper 32 bytes of good Pause + * packets transmitted */ + nveu64_t mmc_tx_pause_frame_h; + /** This counter provides the number of good VLAN packets transmitted */ + nveu64_t mmc_tx_vlan_frame_g; + /** This counter provides upper 32 bytes of good VLAN packets + * transmitted */ + nveu64_t mmc_tx_vlan_frame_g_h; + /** This counter provides the number of packets transmitted without + * errors and with length greater than the maxsize (1,518 or 1,522 bytes + * for VLAN tagged packets; 2000 bytes */ + nveu64_t mmc_tx_osize_frame_g; + /** This counter provides the number of good and bad packets received */ + nveu64_t mmc_rx_framecount_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received */ + nveu64_t mmc_rx_framecount_gb_h; + /** This counter provides the number of bytes received by DWC_ther_qos, + * exclusive of preamble, in good and bad packets */ + nveu64_t mmc_rx_octetcount_gb; + /** This counter provides upper 32 bytes of bytes received by + * DWC_ether_qos, exclusive of preamble, in good and bad packets */ + nveu64_t mmc_rx_octetcount_gb_h; + /** This counter provides the number of bytes received by DWC_ether_qos, + * exclusive of preamble, in good and bad packets */ + nveu64_t mmc_rx_octetcount_g; + /** This counter provides upper 32 bytes of bytes received by + * DWC_ether_qos, exclusive of preamble, in good and bad packets */ + nveu64_t mmc_rx_octetcount_g_h; + /** This counter provides the number of good + * broadcast packets received */ + nveu64_t mmc_rx_broadcastframe_g; + /** This counter provides upper 32 bytes of good + * broadcast packets received */ + nveu64_t mmc_rx_broadcastframe_g_h; + /** This counter provides the number of good + * multicast packets received */ + nveu64_t mmc_rx_multicastframe_g; + /** This counter provides upper 32 bytes of good + * multicast packets received */ + nveu64_t mmc_rx_multicastframe_g_h; + /** This counter provides the number of packets + * received with CRC error */ + nveu64_t mmc_rx_crc_error; + /** This counter provides upper 32 bytes of packets + * received with CRC error */ + nveu64_t mmc_rx_crc_error_h; + /** This counter provides the number of packets received with + * alignment (dribble) error. It is valid only in 10/100 mode */ + nveu64_t mmc_rx_align_error; + /** This counter provides the number of packets received with + * runt (length less than 64 bytes and CRC error) error */ + nveu64_t mmc_rx_runt_error; + /** This counter provides the number of giant packets received with + * length (including CRC) greater than 1,518 bytes (1,522 bytes for + * VLAN tagged) and with CRC error */ + nveu64_t mmc_rx_jabber_error; + /** This counter provides the number of packets received with length + * less than 64 bytes, without any errors */ + nveu64_t mmc_rx_undersize_g; + /** This counter provides the number of packets received without error, + * with length greater than the maxsize */ + nveu64_t mmc_rx_oversize_g; + /** This counter provides the number of good and bad packets received + * with length 64 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_64_octets_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received with length 64 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_64_octets_gb_h; + /** This counter provides the number of good and bad packets received + * with length 65-127 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_65_to_127_octets_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received with length 65-127 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_65_to_127_octets_gb_h; + /** This counter provides the number of good and bad packets received + * with length 128-255 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_128_to_255_octets_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received with length 128-255 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_128_to_255_octets_gb_h; + /** This counter provides the number of good and bad packets received + * with length 256-511 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_256_to_511_octets_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received with length 256-511 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_256_to_511_octets_gb_h; + /** This counter provides the number of good and bad packets received + * with length 512-1023 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_512_to_1023_octets_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received with length 512-1023 bytes, exclusive of the preamble */ + nveu64_t mmc_rx_512_to_1023_octets_gb_h; + /** This counter provides the number of good and bad packets received + * with length 1024-maxbytes, exclusive of the preamble */ + nveu64_t mmc_rx_1024_to_max_octets_gb; + /** This counter provides upper 32 bytes of good and bad packets + * received with length 1024-maxbytes, exclusive of the preamble */ + nveu64_t mmc_rx_1024_to_max_octets_gb_h; + /** This counter provides the number of good unicast packets received */ + nveu64_t mmc_rx_unicast_g; + /** This counter provides upper 32 bytes of good unicast packets + * received */ + nveu64_t mmc_rx_unicast_g_h; + /** This counter provides the number of packets received with length + * error (Length Type field not equal to packet size), for all packets + * with valid length field */ + nveu64_t mmc_rx_length_error; + /** This counter provides upper 32 bytes of packets received with + * length error (Length Type field not equal to packet size), for all + * packets with valid length field */ + nveu64_t mmc_rx_length_error_h; + /** This counter provides the number of packets received with length + * field not equal to the valid packet size (greater than 1,500 but + * less than 1,536) */ + nveu64_t mmc_rx_outofrangetype; + /** This counter provides upper 32 bytes of packets received with + * length field not equal to the valid packet size (greater than 1,500 + * but less than 1,536) */ + nveu64_t mmc_rx_outofrangetype_h; + /** This counter provides the number of good and valid Pause packets + * received */ + nveu64_t mmc_rx_pause_frames; + /** This counter provides upper 32 bytes of good and valid Pause packets + * received */ + nveu64_t mmc_rx_pause_frames_h; + /** This counter provides the number of missed received packets + * because of FIFO overflow in DWC_ether_qos */ + nveu64_t mmc_rx_fifo_overflow; + /** This counter provides upper 32 bytes of missed received packets + * because of FIFO overflow in DWC_ether_qos */ + nveu64_t mmc_rx_fifo_overflow_h; + /** This counter provides the number of good and bad VLAN packets + * received */ + nveu64_t mmc_rx_vlan_frames_gb; + /** This counter provides upper 32 bytes of good and bad VLAN packets + * received */ + nveu64_t mmc_rx_vlan_frames_gb_h; + /** This counter provides the number of packets received with error + * because of watchdog timeout error */ + nveu64_t mmc_rx_watchdog_error; + /** This counter provides the number of packets received with Receive + * error or Packet Extension error on the GMII or MII interface */ + nveu64_t mmc_rx_receive_error; + /** This counter provides the number of packets received with Receive + * error or Packet Extension error on the GMII or MII interface */ + nveu64_t mmc_rx_ctrl_frames_g; + /** This counter provides the number of microseconds Tx LPI is asserted + * in the MAC controller */ + nveu64_t mmc_tx_lpi_usec_cntr; + /** This counter provides the number of times MAC controller has + * entered Tx LPI. */ + nveu64_t mmc_tx_lpi_tran_cntr; + /** This counter provides the number of microseconds Rx LPI is asserted + * in the MAC controller */ + nveu64_t mmc_rx_lpi_usec_cntr; + /** This counter provides the number of times MAC controller has + * entered Rx LPI.*/ + nveu64_t mmc_rx_lpi_tran_cntr; + /** This counter provides the number of good IPv4 datagrams received + * with the TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv4_gd; + /** This counter provides upper 32 bytes of good IPv4 datagrams received + * with the TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv4_gd_h; + /** RxIPv4 Header Error Packets */ + nveu64_t mmc_rx_ipv4_hderr; + /** RxIPv4 of upper 32 bytes of Header Error Packets */ + nveu64_t mmc_rx_ipv4_hderr_h; + /** This counter provides the number of IPv4 datagram packets received + * that did not have a TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv4_nopay; + /** This counter provides upper 32 bytes of IPv4 datagram packets + * received that did not have a TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv4_nopay_h; + /** This counter provides the number of good IPv4 datagrams received + * with fragmentation */ + nveu64_t mmc_rx_ipv4_frag; + /** This counter provides upper 32 bytes of good IPv4 datagrams received + * with fragmentation */ + nveu64_t mmc_rx_ipv4_frag_h; + /** This counter provides the number of good IPv4 datagrams received + * that had a UDP payload with checksum disabled */ + nveu64_t mmc_rx_ipv4_udsbl; + /** This counter provides upper 32 bytes of good IPv4 datagrams received + * that had a UDP payload with checksum disabled */ + nveu64_t mmc_rx_ipv4_udsbl_h; + /** This counter provides the number of good IPv6 datagrams received + * with the TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv6_gd_octets; + /** This counter provides upper 32 bytes of good IPv6 datagrams received + * with the TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv6_gd_octets_h; + /** This counter provides the number of IPv6 datagrams received + * with header (length or version mismatch) errors */ + nveu64_t mmc_rx_ipv6_hderr_octets; + /** This counter provides the number of IPv6 datagrams received + * with header (length or version mismatch) errors */ + nveu64_t mmc_rx_ipv6_hderr_octets_h; + /** This counter provides the number of IPv6 datagram packets received + * that did not have a TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv6_nopay_octets; + /** This counter provides upper 32 bytes of IPv6 datagram packets + * received that did not have a TCP, UDP, or ICMP payload */ + nveu64_t mmc_rx_ipv6_nopay_octets_h; + /* Protocols */ + /** This counter provides the number of good IP datagrams received by + * DWC_ether_qos with a good UDP payload */ + nveu64_t mmc_rx_udp_gd; + /** This counter provides upper 32 bytes of good IP datagrams received + * by DWC_ether_qos with a good UDP payload */ + nveu64_t mmc_rx_udp_gd_h; + /** This counter provides the number of good IP datagrams received by + * DWC_ether_qos with a good UDP payload. This counter is not updated + * when the RxIPv4_UDP_Checksum_Disabled_Packets counter is + * incremented */ + nveu64_t mmc_rx_udp_err; + /** This counter provides upper 32 bytes of good IP datagrams received + * by DWC_ether_qos with a good UDP payload. This counter is not updated + * when the RxIPv4_UDP_Checksum_Disabled_Packets counter is + * incremented */ + nveu64_t mmc_rx_udp_err_h; + /** This counter provides the number of good IP datagrams received + * with a good TCP payload */ + nveu64_t mmc_rx_tcp_gd; + /** This counter provides the number of good IP datagrams received + * with a good TCP payload */ + nveu64_t mmc_rx_tcp_gd_h; + /** This counter provides upper 32 bytes of good IP datagrams received + * with a good TCP payload */ + nveu64_t mmc_rx_tcp_err; + /** This counter provides upper 32 bytes of good IP datagrams received + * with a good TCP payload */ + nveu64_t mmc_rx_tcp_err_h; + /** This counter provides the number of good IP datagrams received + * with a good ICMP payload */ + nveu64_t mmc_rx_icmp_gd; + /** This counter provides upper 32 bytes of good IP datagrams received + * with a good ICMP payload */ + nveu64_t mmc_rx_icmp_gd_h; + /** This counter provides the number of good IP datagrams received + * whose ICMP payload has a checksum error */ + nveu64_t mmc_rx_icmp_err; + /** This counter provides upper 32 bytes of good IP datagrams received + * whose ICMP payload has a checksum error */ + nveu64_t mmc_rx_icmp_err_h; + /** This counter provides the number of bytes received by DWC_ether_qos + * in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv4_gd_octets; + /** This counter provides upper 32 bytes received by DWC_ether_qos + * in good IPv4 datagrams encapsulating TCP, UDP, or ICMP data. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv4_gd_octets_h; + /** This counter provides the number of bytes received in IPv4 datagram + * with header errors (checksum, length, version mismatch). The value + * in the Length field of IPv4 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv4_hderr_octets; + /** This counter provides upper 32 bytes received in IPv4 datagram + * with header errors (checksum, length, version mismatch). The value + * in the Length field of IPv4 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv4_hderr_octets_h; + /** This counter provides the number of bytes received in IPv4 datagram + * that did not have a TCP, UDP, or ICMP payload. The value in the + * Length field of IPv4 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv4_nopay_octets; + /** This counter provides upper 32 bytes received in IPv4 datagram + * that did not have a TCP, UDP, or ICMP payload. The value in the + * Length field of IPv4 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv4_nopay_octets_h; + /** This counter provides the number of bytes received in fragmented + * IPv4 datagrams. The value in the Length field of IPv4 header is + * used to update this counter. (Ethernet header, FCS, pad, or IP pad + * bytes are not included in this counter */ + nveu64_t mmc_rx_ipv4_frag_octets; + /** This counter provides upper 32 bytes received in fragmented + * IPv4 datagrams. The value in the Length field of IPv4 header is + * used to update this counter. (Ethernet header, FCS, pad, or IP pad + * bytes are not included in this counter */ + nveu64_t mmc_rx_ipv4_frag_octets_h; + /** This counter provides the number of bytes received in a UDP segment + * that had the UDP checksum disabled. This counter does not count IP + * Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not + * included in this counter */ + nveu64_t mmc_rx_ipv4_udsbl_octets; + /** This counter provides upper 32 bytes received in a UDP segment + * that had the UDP checksum disabled. This counter does not count IP + * Header bytes. (Ethernet header, FCS, pad, or IP pad bytes are not + * included in this counter */ + nveu64_t mmc_rx_ipv4_udsbl_octets_h; + /** This counter provides the number of bytes received in good IPv6 + * datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header, + * FCS, pad, or IP pad bytes are not included in this counter */ + nveu64_t mmc_rx_ipv6_gd; + /** This counter provides upper 32 bytes received in good IPv6 + * datagrams encapsulating TCP, UDP, or ICMP data. (Ethernet header, + * FCS, pad, or IP pad bytes are not included in this counter */ + nveu64_t mmc_rx_ipv6_gd_h; + /** This counter provides the number of bytes received in IPv6 datagrams + * with header errors (length, version mismatch). The value in the + * Length field of IPv6 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included in + * this counter */ + nveu64_t mmc_rx_ipv6_hderr; + /** This counter provides upper 32 bytes received in IPv6 datagrams + * with header errors (length, version mismatch). The value in the + * Length field of IPv6 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included in + * this counter */ + nveu64_t mmc_rx_ipv6_hderr_h; + /** This counter provides the number of bytes received in IPv6 + * datagrams that did not have a TCP, UDP, or ICMP payload. The value + * in the Length field of IPv6 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv6_nopay; + /** This counter provides upper 32 bytes received in IPv6 + * datagrams that did not have a TCP, UDP, or ICMP payload. The value + * in the Length field of IPv6 header is used to update this counter. + * (Ethernet header, FCS, pad, or IP pad bytes are not included + * in this counter */ + nveu64_t mmc_rx_ipv6_nopay_h; + /* Protocols */ + /** This counter provides the number of bytes received in a good UDP + * segment. This counter does not count IP header bytes */ + nveu64_t mmc_rx_udp_gd_octets; + /** This counter provides upper 32 bytes received in a good UDP + * segment. This counter does not count IP header bytes */ + nveu64_t mmc_rx_udp_gd_octets_h; + /** This counter provides the number of bytes received in a UDP + * segment that had checksum errors. This counter does not count + * IP header bytes */ + nveu64_t mmc_rx_udp_err_octets; + /** This counter provides upper 32 bytes received in a UDP + * segment that had checksum errors. This counter does not count + * IP header bytes */ + nveu64_t mmc_rx_udp_err_octets_h; + /** This counter provides the number of bytes received in a good + * TCP segment. This counter does not count IP header bytes */ + nveu64_t mmc_rx_tcp_gd_octets; + /** This counter provides upper 32 bytes received in a good + * TCP segment. This counter does not count IP header bytes */ + nveu64_t mmc_rx_tcp_gd_octets_h; + /** This counter provides the number of bytes received in a TCP + * segment that had checksum errors. This counter does not count + * IP header bytes */ + nveu64_t mmc_rx_tcp_err_octets; + /** This counter provides upper 32 bytes received in a TCP + * segment that had checksum errors. This counter does not count + * IP header bytes */ + nveu64_t mmc_rx_tcp_err_octets_h; + /** This counter provides the number of bytes received in a good + * ICMP segment. This counter does not count IP header bytes */ + nveu64_t mmc_rx_icmp_gd_octets; + /** This counter provides upper 32 bytes received in a good + * ICMP segment. This counter does not count IP header bytes */ + nveu64_t mmc_rx_icmp_gd_octets_h; + /** This counter provides the number of bytes received in a ICMP + * segment that had checksum errors. This counter does not count + * IP header bytes */ + nveu64_t mmc_rx_icmp_err_octets; + /** This counter provides upper 32 bytes received in a ICMP + * segment that had checksum errors. This counter does not count + * IP header bytes */ + nveu64_t mmc_rx_icmp_err_octets_h; + /** This counter provides the number of additional mPackets + * transmitted due to preemption */ + nveu64_t mmc_tx_fpe_frag_cnt; + /** This counter provides the count of number of times a hold + * request is given to MAC */ + nveu64_t mmc_tx_fpe_hold_req_cnt; + /** This counter provides the number of MAC frames with reassembly + * errors on the Receiver, due to mismatch in the fragment + * count value */ + nveu64_t mmc_rx_packet_reass_err_cnt; + /** This counter the number of received MAC frames rejected + * due to unknown SMD value and MAC frame fragments rejected due + * to arriving with an SMD-C when there was no preceding preempted + * frame */ + nveu64_t mmc_rx_packet_smd_err_cnt; + /** This counter provides the number of MAC frames that were + * successfully reassembled and delivered to MAC */ + nveu64_t mmc_rx_packet_asm_ok_cnt; + /** This counter provides the number of additional mPackets received + * due to preemption */ + nveu64_t mmc_rx_fpe_fragment_cnt; +}; + +#pragma pack(pop) +#endif /* INCLUDED_NVETHERNETRM_EXPORT_H */ diff --git a/kernel/nvethernetrm/include/nvethernetrm_l3l4.h b/kernel/nvethernetrm/include/nvethernetrm_l3l4.h new file mode 100644 index 0000000000..94649a9268 --- /dev/null +++ b/kernel/nvethernetrm/include/nvethernetrm_l3l4.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef INCLUDED_NVETHERNETRM_L3L4_H +#define INCLUDED_NVETHERNETRM_L3L4_H + +#include + +/** helper macro for enable */ +#define OSI_TRUE ((nveu32_t)1U) + +/** helper macro to disable */ +#define OSI_FALSE ((nveu32_t)0U) + +/** + * @brief L3/L4 filter function dependent parameter + */ +struct osi_l3_l4_filter { + /** filter data */ + struct { +#ifndef OSI_STRIPPED_LIB + /** udp (OSI_TRUE) or tcp (OSI_FALSE) */ + nveu32_t is_udp; + /** ipv6 (OSI_TRUE) or ipv4 (OSI_FALSE) */ + nveu32_t is_ipv6; +#endif /* !OSI_STRIPPED_LIB */ + /** destination ip address information */ + struct { + /** ipv4 address */ + nveu8_t ip4_addr[4]; +#ifndef OSI_STRIPPED_LIB + /** ipv6 address */ + nveu16_t ip6_addr[8]; + /** Port number */ + nveu16_t port_no; + /** addr match enable (OSI_TRUE) or disable (OSI_FALSE) */ + nveu32_t addr_match; + /** perfect(OSI_FALSE) or inverse(OSI_TRUE) match for address */ + nveu32_t addr_match_inv; + /** port match enable (OSI_TRUE) or disable (OSI_FALSE) */ + nveu32_t port_match; + /** perfect(OSI_FALSE) or inverse(OSI_TRUE) match for port */ + nveu32_t port_match_inv; +#endif /* !OSI_STRIPPED_LIB */ + } dst; +#ifndef OSI_STRIPPED_LIB + /** ip address and port information */ + struct { + /** ipv4 address */ + nveu8_t ip4_addr[4]; + /** ipv6 address */ + nveu16_t ip6_addr[8]; + /** Port number */ + nveu16_t port_no; + /** addr match enable (OSI_TRUE) or disable (OSI_FALSE) */ + nveu32_t addr_match; + /** perfect(OSI_FALSE) or inverse(OSI_TRUE) match for address */ + nveu32_t addr_match_inv; + /** port match enable (OSI_TRUE) or disable (OSI_FALSE) */ + nveu32_t port_match; + /** perfect(OSI_FALSE) or inverse(OSI_TRUE) match for port */ + nveu32_t port_match_inv; + } src; +#endif /* !OSI_STRIPPED_LIB */ + } data; +#ifndef OSI_STRIPPED_LIB + /** Represents whether DMA routing enabled (OSI_TRUE) or not (OSI_FALSE) */ + nveu32_t dma_routing_enable; +#endif /* !OSI_STRIPPED_LIB */ + /** DMA channel number of routing enabled */ + nveu32_t dma_chan; + /** filter enable (OSI_TRUE) or disable (OSI_FALSE) */ + nveu32_t filter_enb_dis; +}; + +#endif /* INCLUDED_NVETHERNETRM_L3L4_H */ diff --git a/kernel/nvethernetrm/include/osi_common.h b/kernel/nvethernetrm/include/osi_common.h index 0c3a75fe1a..14050eb1d2 100644 --- a/kernel/nvethernetrm/include/osi_common.h +++ b/kernel/nvethernetrm/include/osi_common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,7 +23,7 @@ #ifndef INCLUDED_OSI_COMMON_H #define INCLUDED_OSI_COMMON_H -#include "../osi/common/type.h" +#include /** * @addtogroup FC Flow Control Threshold Macros @@ -32,22 +32,9 @@ * the flow control is asserted or de-asserted * @{ */ -#define FULL_MINUS_1_5K (unsigned int)1 -#define FULL_MINUS_2_K (unsigned int)2 -#define FULL_MINUS_2_5K (unsigned int)3 -#define FULL_MINUS_3_K (unsigned int)4 -#define FULL_MINUS_4_K (unsigned int)6 -#define FULL_MINUS_6_K (unsigned int)10 -#define FULL_MINUS_10_K (unsigned int)18 -#define FULL_MINUS_13_K (unsigned int)24 -#define FULL_MINUS_14_K (unsigned int)26 -#define FULL_MINUS_16_K (unsigned int)30 -#define FULL_MINUS_18_K (unsigned int)34 -#define FULL_MINUS_21_K (unsigned int)40 -#define FULL_MINUS_24_K (unsigned int)46 -#define FULL_MINUS_29_K (unsigned int)56 -#define FULL_MINUS_31_K (unsigned int)60 -#define FULL_MINUS_32_K (unsigned int)62 +#define FULL_MINUS_1_5K ((nveu32_t)1) +#define FULL_MINUS_16_K ((nveu32_t)30) +#define FULL_MINUS_32_K ((nveu32_t)62) /** @} */ /** @@ -66,13 +53,46 @@ #define OSI_MAX_TX_COALESCE_USEC 1020U #define OSI_MIN_TX_COALESCE_USEC 32U #define OSI_MIN_TX_COALESCE_FRAMES 1U +#define OSI_PAUSE_FRAMES_DISABLE 0U +#define OSI_PAUSE_FRAMES_ENABLE 1U #endif /* !OSI_STRIPPED_LIB */ /* Compiler hints for branch prediction */ #define osi_unlikely(x) __builtin_expect(!!(x), 0) /** @} */ +/** + * @addtogroup Helper MACROS + * + * @brief EQOS generic helper MACROS. + * @{ + */ +#define OSI_MAX_24BITS 0xFFFFFFU +#define OSI_MAX_28BITS 0xFFFFFFFU +#define OSI_MAX_32BITS 0xFFFFFFFFU +#define OSI_MASK_16BITS 0xFFFFU +#define OSI_MASK_20BITS 0xFFFFFU +#define OSI_MASK_24BITS 0xFFFFFFU +#define OSI_GCL_SIZE_64 64U +#define OSI_GCL_SIZE_128 128U +#define OSI_GCL_SIZE_512 512U +#define OSI_GCL_SIZE_1024 1024U +/** @} */ + #ifndef OSI_STRIPPED_LIB +/** + * @addtogroup Helper MACROS + * + * @brief EQOS generic helper MACROS. + * @{ + */ +#define OSI_PTP_REQ_CLK_FREQ 250000000U +#define OSI_FLOW_CTRL_DISABLE 0U +#define OSI_ADDRESS_32BIT 0 +#define OSI_ADDRESS_40BIT 1 +#define OSI_ADDRESS_48BIT 2 +/** @ } */ + /** * @addtogroup - LPI-Timers LPI configuration macros * @@ -120,47 +140,22 @@ /** @} */ #endif /* !OSI_STRIPPED_LIB */ -/** - * @addtogroup Helper Helper MACROS - * - * @brief EQOS generic helper MACROS. - * @{ - */ -#ifndef OSI_STRIPPED_LIB -#define OSI_PAUSE_FRAMES_ENABLE 0U -#define OSI_PTP_REQ_CLK_FREQ 250000000U -#define OSI_FLOW_CTRL_DISABLE 0U -#define OSI_MAX_24BITS 0xFFFFFFU -#define OSI_MAX_28BITS 0xFFFFFFFU -#define OSI_MAX_32BITS 0xFFFFFFFFU -#define OSI_MASK_16BITS 0xFFFFU -#define OSI_MASK_20BITS 0xFFFFFU -#define OSI_MASK_24BITS 0xFFFFFFU -#define OSI_GCL_SIZE_64 64U -#define OSI_GCL_SIZE_128 128U -#define OSI_GCL_SIZE_256 256U -#define OSI_GCL_SIZE_512 512U -#define OSI_GCL_SIZE_1024 1024U - #define OSI_POLL_COUNT 1000U - -#define OSI_ADDRESS_32BIT 0 -#define OSI_ADDRESS_40BIT 1 -#define OSI_ADDRESS_48BIT 2 -#endif /* !OSI_STRIPPED_LIB */ - #ifndef UINT_MAX #define UINT_MAX (~0U) #endif #ifndef INT_MAX #define INT_MAX (0x7FFFFFFF) +#ifndef OSI_LLONG_MAX +#define OSI_LLONG_MAX (0x7FFFFFFFFFFFFFFF) +#endif #endif /** @} */ /** - * @addtogroup Helper Helper MACROS + * @addtogroup Generic helper MACROS * - * @brief EQOS generic helper MACROS. + * @brief These are Generic helper macros used at various places. * @{ */ #define OSI_UCHAR_MAX (0xFFU) @@ -168,22 +163,24 @@ /* Logging defines */ /* log levels */ -#define OSI_LOG_INFO 1U +#define OSI_LOG_INFO 1U +#ifndef OSI_STRIPPED_LIB #define OSI_LOG_WARN 2U +#endif /* OSI_STRIPPED_LIB */ #define OSI_LOG_ERR 3U /* Error types */ #define OSI_LOG_ARG_OUTOFBOUND 1U #define OSI_LOG_ARG_INVALID 2U #define OSI_LOG_ARG_HW_FAIL 4U -#define OSI_LOG_WARN 2U -#ifndef OSI_STRIPPED_LIB #define OSI_LOG_ARG_OPNOTSUPP 3U -#endif /* !OSI_STRIPPED_LIB */ /* Default maximum Giant Packet Size Limit is 16K */ #define OSI_MAX_MTU_SIZE 16383U + +#ifdef UPDATED_PAD_CAL /* MAC Tx/Rx Idle retry and delay count */ #define OSI_TXRX_IDLE_RETRY 5000U #define OSI_DELAY_COUNT 10U +#endif #define EQOS_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x1160U) #define MGBE_DMA_CHX_STATUS(x) ((0x0080U * (x)) + 0x3160U) @@ -200,15 +197,16 @@ /* MACSEC max SC's supported 16*/ #define OSI_MACSEC_SC_INDEX_MAX 16 +#ifndef OSI_STRIPPED_LIB /* HW supports 8 Hash table regs, but eqos_validate_core_regs only checks 4 */ #define OSI_EQOS_MAX_HASH_REGS 4U +#endif /* OSI_STRIPPED_LIB */ #define MAC_VERSION 0x110 #define MAC_VERSION_SNVER_MASK 0x7FU #define OSI_MAC_HW_EQOS 0U #define OSI_MAC_HW_MGBE 1U -#define OSI_ETH_ALEN 6U #define OSI_MAX_VM_IRQS 5U #define OSI_NULL ((void *)0) @@ -216,37 +214,30 @@ #define OSI_NONE 0U #define OSI_NONE_SIGNED 0 #define OSI_DISABLE 0U +#define OSI_H_DISABLE 0x10101010U +#define OSI_H_ENABLE (~OSI_H_DISABLE) #define OSI_BIT(nr) ((nveu32_t)1 << (nr)) +#ifndef OSI_STRIPPED_LIB +#define OSI_MGBE_MAC_3_00 0x30U #define OSI_EQOS_MAC_4_10 0x41U -#define OSI_EQOS_MAC_5_00 0x50U #define OSI_EQOS_MAC_5_10 0x51U +#define OSI_MGBE_MAC_4_00 0x40U +#endif /* OSI_STRIPPED_LIB */ + +#define OSI_EQOS_MAC_5_00 0x50U #define OSI_EQOS_MAC_5_30 0x53U -#define OSI_MGBE_MAC_3_00 0x30U #define OSI_MGBE_MAC_3_10 0x31U -#define OSI_MGBE_MAC_4_00 0x40U #define OSI_MAX_VM_IRQS 5U -#define OSI_IP4_FILTER 0U -#define OSI_IP6_FILTER 1U #ifndef OSI_STRIPPED_LIB -#define OSI_L2_FILTER_INDEX_ANY 127U #define OSI_HASH_FILTER_MODE 1U #define OSI_L4_FILTER_TCP 0U #define OSI_L4_FILTER_UDP 1U #define OSI_PERFECT_FILTER_MODE 0U -#define NV_ETH_FCS_LEN 0x4U -#define NV_ETH_FRAME_LEN 1514U - -#define MAX_ETH_FRAME_LEN_DEFAULT \ - (NV_ETH_FRAME_LEN + NV_ETH_FCS_LEN + NV_VLAN_HLEN) -#define OSI_MTU_SIZE_16K 16000U -#define OSI_MTU_SIZE_8K 8000U -#define OSI_MTU_SIZE_4K 4000U -#define OSI_MTU_SIZE_2K 2000U #define OSI_INVALID_CHAN_NUM 0xFFU #endif /* OSI_STRIPPED_LIB */ /** @} */ @@ -262,30 +253,7 @@ #define OSI_DEBUG_TYPE_REG 2U #define OSI_DEBUG_TYPE_STRUCTS 3U #endif /* OSI_DEBUG */ - -#ifndef OSI_STRIPPED_LIB -/** - * @addtogroup MTL queue operation mode - * - * @brief MTL queue operation mode options - * @{ - */ -#define OSI_MTL_QUEUE_DISABLED 0x0U -#define OSI_MTL_QUEUE_AVB 0x1U -#define OSI_MTL_QUEUE_ENABLE 0x2U -#define OSI_MTL_QUEUE_MODEMAX 0x3U -/** @} */ - -/** - * @addtogroup EQOS_MTL MTL queue AVB algorithm mode - * - * @brief MTL AVB queue algorithm type - * @{ - */ -#define OSI_MTL_TXQ_AVALG_CBS 1U -#define OSI_MTL_TXQ_AVALG_SP 0U /** @} */ -#endif /* OSI_STRIPPED_LIB */ /** * @brief unused function attribute @@ -320,7 +288,7 @@ static inline nveu64_t osi_update_stats_counter(nveu64_t last_value, if (temp < last_value) { /* Stats overflow, so reset it to zero */ - return 0UL; + temp = 0UL; } return temp; diff --git a/kernel/nvethernetrm/include/osi_core.h b/kernel/nvethernetrm/include/osi_core.h index 2c146d0983..97e13b7349 100644 --- a/kernel/nvethernetrm/include/osi_core.h +++ b/kernel/nvethernetrm/include/osi_core.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,6 +23,8 @@ #ifndef INCLUDED_OSI_CORE_H #define INCLUDED_OSI_CORE_H +#include "nvethernetrm_export.h" +#include "nvethernetrm_l3l4.h" #include #include "mmc.h" @@ -36,18 +38,79 @@ struct ivc_msg_common; /* Following added to avoid misraC 4.6 * Here we are defining intermediate type */ -/** intermediate type for unsigned short */ -typedef unsigned short my_uint16_t; /** intermediate type for long long */ typedef long long my_lint_64; -/* Actual type used in code */ -/** typedef equivalent to unsigned short */ -typedef my_uint16_t nveu16_t; /** typedef equivalent to long long */ typedef my_lint_64 nvel64_t; /** @} */ +#ifndef OSI_STRIPPED_LIB +#define OSI_OPER_EN_L2_DA_INV OSI_BIT(4) +#define OSI_OPER_DIS_L2_DA_INV OSI_BIT(5) +#define OSI_PTP_SNAP_TRANSPORT 1U +#define OSI_VLAN_ACTION_DEL 0x0U +#define OSI_VLAN_ACTION_ADD OSI_BIT(31) +#define OSI_RXQ_ROUTE_PTP 0U +#define EQOS_MAX_HTR_REGS 8U + +/** + * @addtogroup RSS related information + * + * @brief RSS hash key and table size. + * @{ + */ +#define OSI_RSS_HASH_KEY_SIZE 40U +#define OSI_RSS_MAX_TABLE_SIZE 128U +/** @} */ + +#define OSI_CMD_RESET_MMC 12U +#define OSI_CMD_MDC_CONFIG 1U +#define OSI_CMD_MAC_LB 14U +#define OSI_CMD_FLOW_CTRL 15U +#define OSI_CMD_CONFIG_TXSTATUS 27U +#define OSI_CMD_CONFIG_RX_CRC_CHECK 25U +#define OSI_CMD_CONFIG_EEE 32U +#define OSI_CMD_ARP_OFFLOAD 30U +#define OSI_CMD_UPDATE_VLAN_ID 26U +#define OSI_CMD_VLAN_FILTER 31U +#define OSI_CMD_CONFIG_PTP_OFFLOAD 34U +#define OSI_CMD_PTP_RXQ_ROUTE 35U +#define OSI_CMD_CONFIG_RSS 37U +#define OSI_CMD_CONFIG_FW_ERR 29U +#define OSI_CMD_SET_MODE 16U +#define OSI_CMD_POLL_FOR_MAC_RST 4U +#define OSI_CMD_GET_MAC_VER 10U + +/** + * @addtogroup PTP-offload PTP offload defines + * @{ + */ +#define OSI_PTP_MAX_PORTID 0xFFFFU +#define OSI_PTP_MAX_DOMAIN 0xFFU +#define OSI_PTP_SNAP_ORDINARY 0U +#define OSI_PTP_SNAP_P2P 3U +/** @} */ + +#define OSI_MAC_TCR_TSMASTERENA OSI_BIT(15) +#define OSI_MAC_TCR_TSEVENTENA OSI_BIT(14) +#define OSI_MAC_TCR_TSENALL OSI_BIT(8) +#define OSI_MAC_TCR_SNAPTYPSEL_3 (OSI_BIT(16) | OSI_BIT(17)) +#define OSI_MAC_TCR_SNAPTYPSEL_2 OSI_BIT(17) +#define OSI_MAC_TCR_CSC OSI_BIT(19) +#define OSI_MAC_TCR_AV8021ASMEN OSI_BIT(28) + +#define OSI_FLOW_CTRL_RX OSI_BIT(1) + +#define OSI_INSTANCE_ID_MBGE0 0 +#define OSI_INSTANCE_ID_MGBE1 1 +#define OSI_INSTANCE_ID_MGBE2 2 +#define OSI_INSTANCE_ID_MGBE3 3 +#define OSI_INSTANCE_ID_EQOS 4 + +#endif /* !OSI_STRIPPED_LIB */ + + #ifdef MACSEC_SUPPORT /** * @addtogroup MACSEC related helper MACROs @@ -63,16 +126,6 @@ typedef my_lint_64 nvel64_t; /** @} */ #endif /* MACSEC_SUPPORT */ -/** - * @addtogroup PTP PTP related information - * - * @brief PTP SSINC values - * @{ - */ -#define OSI_PTP_SSINC_16 16U -#define OSI_PTP_SSINC_4 4U -/** @} */ - /** * @addtogroup PTP PTP related information * @@ -83,6 +136,7 @@ typedef my_lint_64 nvel64_t; #define OSI_PTP_M2M_SECONDARY 2U /** @} */ + /** * @addtogroup EQOS_PTP PTP Helper MACROS * @@ -91,55 +145,47 @@ typedef my_lint_64 nvel64_t; */ #define OSI_MAC_TCR_TSENA OSI_BIT(0) #define OSI_MAC_TCR_TSCFUPDT OSI_BIT(1) -#define OSI_MAC_TCR_TSENALL OSI_BIT(8) #define OSI_MAC_TCR_TSCTRLSSR OSI_BIT(9) #define OSI_MAC_TCR_TSVER2ENA OSI_BIT(10) #define OSI_MAC_TCR_TSIPENA OSI_BIT(11) #define OSI_MAC_TCR_TSIPV6ENA OSI_BIT(12) #define OSI_MAC_TCR_TSIPV4ENA OSI_BIT(13) -#define OSI_MAC_TCR_TSEVENTENA OSI_BIT(14) -#define OSI_MAC_TCR_TSMASTERENA OSI_BIT(15) #define OSI_MAC_TCR_SNAPTYPSEL_1 OSI_BIT(16) -#define OSI_MAC_TCR_SNAPTYPSEL_2 OSI_BIT(17) -#define OSI_MAC_TCR_CSC OSI_BIT(19) -#define OSI_MAC_TCR_AV8021ASMEN OSI_BIT(28) -#define OSI_MAC_TCR_SNAPTYPSEL_3 (OSI_BIT(16) | OSI_BIT(17)) #define OSI_MAC_TCR_TXTSSMIS OSI_BIT(31) /** @} */ /** - * @addtogroup Helper Helper MACROS + * @addtogroup Helper MACROS * * @brief EQOS generic helper MACROS. * @{ */ #define EQOS_DMA_CHX_IER(x) ((0x0080U * (x)) + 0x1134U) #define EQOS_MAX_MAC_ADDRESS_FILTER 128U +#define EQOS_MAX_MAC_5_3_ADDRESS_FILTER 32U #define EQOS_MAX_L3_L4_FILTER 8U -#define EQOS_MAX_HTR_REGS 8U #define OSI_MGBE_MAX_MAC_ADDRESS_FILTER 32U #define OSI_DA_MATCH 0U +#ifndef OSI_STRIPPED_LIB #define OSI_INV_MATCH 1U +#endif /* !OSI_STRIPPED_LIB */ #define OSI_AMASK_DISABLE 0U #define OSI_CHAN_ANY 0xFFU -#define OSI_MAX_TC_NUM 8U #define OSI_DFLT_MTU_SIZE 1500U #define OSI_MTU_SIZE_9000 9000U +/* Reg ETHER_QOS_AUTO_CAL_CONFIG_0[AUTO_CAL_PD/PU_OFFSET] max value */ +#define OSI_PAD_CAL_CONFIG_PD_PU_OFFSET_MAX 0x1FU + +#ifndef OSI_STRIPPED_LIB /* HW supports 8 Hash table regs, but eqos_validate_core_regs only checks 4 */ #define OSI_EQOS_MAX_HASH_REGS 4U -#define OSI_ETH_ALEN 6U +#endif /* !OSI_STRIPPED_LIB */ #define OSI_FLOW_CTRL_TX OSI_BIT(0) -#define OSI_FLOW_CTRL_RX OSI_BIT(1) #define OSI_FULL_DUPLEX 1 #define OSI_HALF_DUPLEX 0 -#define OSI_IP4_FILTER 0U -#define OSI_IP6_FILTER 1U -#define OSI_IPV6_MATCH 1U -#define OSI_IPV4_MATCH 0U - /* L2 filter operations supported by OSI layer. These operation modes shall be * set by OSD driver as input to update registers accordingly. */ @@ -147,16 +193,12 @@ typedef my_lint_64 nvel64_t; #define OSI_OPER_DIS_PROMISC OSI_BIT(1) #define OSI_OPER_EN_ALLMULTI OSI_BIT(2) #define OSI_OPER_DIS_ALLMULTI OSI_BIT(3) -#define OSI_OPER_EN_L2_DA_INV OSI_BIT(4) -#define OSI_OPER_DIS_L2_DA_INV OSI_BIT(5) #define OSI_OPER_EN_PERFECT OSI_BIT(6) #define OSI_OPER_DIS_PERFECT OSI_BIT(7) #define OSI_OPER_ADDR_UPDATE OSI_BIT(8) #define OSI_OPER_ADDR_DEL OSI_BIT(9) -#define OSI_PAUSE_FRAMES_DISABLE 1U #define OSI_PFT_MATCH 0U -#define OSI_SOURCE_MATCH 0U #define OSI_SA_MATCH 1U #define OSI_SPEED_10 10 @@ -177,42 +219,20 @@ typedef my_lint_64 nvel64_t; * @brief Ethernet PHY Interface Modes */ #define OSI_XFI_MODE_10G 0U -#define OSI_XFI_MODE_5G 1U +#define OSI_XFI_MODE_5G 1U #define OSI_USXGMII_MODE_10G 2U #define OSI_USXGMII_MODE_5G 3U -/** - * @addtogroup PTP-offload PTP offload defines - * @{ - */ -#define OSI_PTP_SNAP_ORDINARY 0U -#define OSI_PTP_SNAP_TRANSPORT 1U -#define OSI_PTP_SNAP_P2P 3U -#define OSI_PTP_MAX_PORTID 0xFFFFU -#define OSI_PTP_MAX_DOMAIN 0xFFU - /** * @addtogroup IOCTL OPS MACROS * * @brief IOCTL OPS for runtime commands * @{ */ -#define OSI_CMD_MDC_CONFIG 1U -#define OSI_CMD_RESTORE_REGISTER 2U #define OSI_CMD_L3L4_FILTER 3U -#define OSI_CMD_POLL_FOR_MAC_RST 4U -#define OSI_CMD_START_MAC 5U -#define OSI_CMD_STOP_MAC 6U #define OSI_CMD_COMMON_ISR 7U #define OSI_CMD_PAD_CALIBRATION 8U #define OSI_CMD_READ_MMC 9U -#define OSI_CMD_GET_MAC_VER 10U -#define OSI_CMD_VALIDATE_CORE_REG 11U -#define OSI_CMD_RESET_MMC 12U -#define OSI_CMD_SAVE_REGISTER 13U -#define OSI_CMD_MAC_LB 14U -#define OSI_CMD_FLOW_CTRL 15U -#define OSI_CMD_SET_MODE 16U #define OSI_CMD_SET_SPEED 17U #define OSI_CMD_L2_FILTER 18U #define OSI_CMD_RXCSUM_OFFLOAD 19U @@ -221,19 +241,9 @@ typedef my_lint_64 nvel64_t; #define OSI_CMD_CONFIG_PTP 22U #define OSI_CMD_GET_AVB 23U #define OSI_CMD_SET_AVB 24U -#define OSI_CMD_CONFIG_RX_CRC_CHECK 25U -#define OSI_CMD_UPDATE_VLAN_ID 26U -#define OSI_CMD_CONFIG_TXSTATUS 27U #define OSI_CMD_GET_HW_FEAT 28U -#define OSI_CMD_CONFIG_FW_ERR 29U -#define OSI_CMD_ARP_OFFLOAD 30U -#define OSI_CMD_VLAN_FILTER 31U -#define OSI_CMD_CONFIG_EEE 32U #define OSI_CMD_SET_SYSTOHW_TIME 33U -#define OSI_CMD_CONFIG_PTP_OFFLOAD 34U -#define OSI_CMD_PTP_RXQ_ROUTE 35U #define OSI_CMD_CONFIG_FRP 36U -#define OSI_CMD_CONFIG_RSS 37U #define OSI_CMD_CONFIG_EST 38U #define OSI_CMD_CONFIG_FPE 39U #define OSI_CMD_READ_REG 40U @@ -254,8 +264,18 @@ typedef my_lint_64 nvel64_t; #ifdef HSI_SUPPORT #define OSI_CMD_HSI_CONFIGURE 51U #endif +#ifdef OSI_DEBUG +#define OSI_CMD_DEBUG_INTR_CONFIG 52U +#endif +#define OSI_CMD_SUSPEND 53U +#define OSI_CMD_RESUME 54U +#ifdef HSI_SUPPORT +#define OSI_CMD_HSI_INJECT_ERR 55U +#endif +#define OSI_CMD_READ_STATS 56U /** @} */ +#ifdef LOG_OSI /** * @brief OSI error macro definition, * @param[in] priv: OSD private data OR NULL @@ -281,30 +301,21 @@ typedef my_lint_64 nvel64_t; osi_core->osd_ops.ops_log(priv, __func__, __LINE__, \ OSI_LOG_INFO, type, err, loga); \ } +#else +#define OSI_CORE_ERR(priv, type, err, loga) +#define OSI_CORE_INFO(priv, type, err, loga) +#endif #define VLAN_NUM_VID 4096U -#define OSI_VLAN_ACTION_ADD OSI_BIT(31) -#define OSI_VLAN_ACTION_DEL 0x0U -#define OSI_RXQ_ROUTE_PTP 0U #define OSI_DELAY_1000US 1000U #define OSI_DELAY_1US 1U -/** - * @addtogroup RSS related information - * - * @brief RSS hash key and table size. - * @{ - */ -#define OSI_RSS_HASH_KEY_SIZE 40U -#define OSI_RSS_MAX_TABLE_SIZE 128U -/** @} */ /** - * @addtogroup PTP related information + * @addtogroup PTP PTP related information * * @brief PTP SSINC values * @{ */ -#define OSI_PTP_SSINC_16 16U #define OSI_PTP_SSINC_4 4U #define OSI_PTP_SSINC_6 6U /** @} */ @@ -315,13 +326,16 @@ typedef my_lint_64 nvel64_t; * @brief Flexible Receive Parser commands, table size and other defines * @{ */ +#ifndef OSI_STRIPPED_LIB +#define OSI_FRP_CMD_MAX 3U +#define OSI_FRP_MATCH_MAX 10U +#endif /* !OSI_STRIPPED_LIB */ #define OSI_FRP_MAX_ENTRY 256U #define OSI_FRP_OFFSET_MAX 64U /* FRP Command types */ #define OSI_FRP_CMD_ADD 0U #define OSI_FRP_CMD_UPDATE 1U #define OSI_FRP_CMD_DEL 2U -#define OSI_FRP_CMD_MAX 3U /* FRP Filter mode defines */ #define OSI_FRP_MODE_ROUTE 0U #define OSI_FRP_MODE_DROP 1U @@ -333,7 +347,6 @@ typedef my_lint_64 nvel64_t; #define OSI_FRP_MODE_IM_LINK 7U #define OSI_FRP_MODE_MAX 8U /* Match data defines */ -#define OSI_FRP_MATCH_DATA_MAX 12U #define OSI_FRP_MATCH_NORMAL 0U #define OSI_FRP_MATCH_L2_DA 1U #define OSI_FRP_MATCH_L2_SA 2U @@ -344,32 +357,30 @@ typedef my_lint_64 nvel64_t; #define OSI_FRP_MATCH_L4_S_TPORT 7U #define OSI_FRP_MATCH_L4_D_TPORT 8U #define OSI_FRP_MATCH_VLAN 9U -#define OSI_FRP_MATCH_MAX 10U /** @} */ +#define XPCS_WRITE_FAIL_CODE -9 + #ifdef HSI_SUPPORT /** - * @addtogroup hsi_err_code_idx + * @addtogroup osi_hsi_err_code_idx * - * @brief data index for hsi_err_code array + * @brief data index for osi_hsi_err_code array * @{ */ -#define REPORTER_IDX 2U - #define UE_IDX 0U #define CE_IDX 1U #define RX_CRC_ERR_IDX 2U #define TX_FRAME_ERR_IDX 3U #define RX_CSUM_ERR_IDX 4U #define AUTONEG_ERR_IDX 5U - +#define XPCS_WRITE_FAIL_IDX 6U #define MACSEC_RX_CRC_ERR_IDX 0U #define MACSEC_TX_CRC_ERR_IDX 1U #define MACSEC_RX_ICV_ERR_IDX 2U +#define MACSEC_REG_VIOL_ERR_IDX 3U /** @} */ -extern nveu32_t hsi_err_code[][3]; - /** * @addtogroup HSI_TIME_THRESHOLD * @@ -388,13 +399,14 @@ extern nveu32_t hsi_err_code[][3]; /** * @brief Maximum number of different mac error code + * HSI_SW_ERR_CODE + Two (Corrected and Uncorrected error code) */ -#define HSI_MAX_MAC_ERROR_CODE 6U +#define OSI_HSI_MAX_MAC_ERROR_CODE 7U /** * @brief Maximum number of different macsec error code */ -#define HSI_MAX_MACSEC_ERROR_CODE 3U +#define HSI_MAX_MACSEC_ERROR_CODE 4U /** * @addtogroup HSI_SW_ERR_CODE @@ -409,7 +421,26 @@ extern nveu32_t hsi_err_code[][3]; #define OSI_MACSEC_RX_CRC_ERR 0x1005U #define OSI_MACSEC_TX_CRC_ERR 0x1006U #define OSI_MACSEC_RX_ICV_ERR 0x1007U - +#define OSI_MACSEC_REG_VIOL_ERR 0x1008U +#define OSI_XPCS_WRITE_FAIL_ERR 0x1009U + +#define OSI_HSI_MGBE0_UE_CODE 0x2A00U +#define OSI_HSI_MGBE1_UE_CODE 0x2A01U +#define OSI_HSI_MGBE2_UE_CODE 0x2A02U +#define OSI_HSI_MGBE3_UE_CODE 0x2A03U +#define OSI_HSI_EQOS0_UE_CODE 0x28ADU + +#define OSI_HSI_MGBE0_CE_CODE 0x2E08U +#define OSI_HSI_MGBE1_CE_CODE 0x2E09U +#define OSI_HSI_MGBE2_CE_CODE 0x2E0AU +#define OSI_HSI_MGBE3_CE_CODE 0x2E0BU +#define OSI_HSI_EQOS0_CE_CODE 0x2DE6U + +#define OSI_HSI_MGBE0_REPORTER_ID 0x8019U +#define OSI_HSI_MGBE1_REPORTER_ID 0x801AU +#define OSI_HSI_MGBE2_REPORTER_ID 0x801BU +#define OSI_HSI_MGBE3_REPORTER_ID 0x801CU +#define OSI_HSI_EQOS0_REPORTER_ID 0x8009U /** @} */ #endif @@ -443,39 +474,20 @@ struct osi_filter { nveu32_t dma_chansel; }; +#ifndef OSI_STRIPPED_LIB /** * @brief OSI core structure for RXQ route */ struct osi_rxq_route { #define OSI_RXQ_ROUTE_PTP 0U /** Indicates RX routing type OSI_RXQ_ROUTE_* */ - unsigned int route_type; + nveu32_t route_type; /** RXQ routing enable(1) disable (0) */ - unsigned int enable; + nveu32_t enable; /** RX queue index */ - unsigned int idx; -}; - -/** - * @brief L3/L4 filter function dependent parameter - */ -struct osi_l3_l4_filter { - /** Indicates the index of the filter to be modified. - * Filter index must be between 0 - 7 */ - nveu32_t filter_no; - /** filter enable(1) or disable(0) */ - nveu32_t filter_enb_dis; - /** source(0) or destination(1) */ - nveu32_t src_dst_addr_match; - /** perfect(0) or inverse(1) */ - nveu32_t perfect_inverse_match; - /** ipv4 address */ - nveu8_t ip4_addr[4]; - /** ipv6 address */ - nveu16_t ip6_addr[8]; - /** Port number */ - nveu16_t port_no; + nveu32_t idx; }; +#endif /** * @brief struct osi_hw_features - MAC HW supported features. @@ -800,64 +812,6 @@ struct osi_vlan_filter { nveu32_t perfect_inverse_match; }; -/** - * @brief FRP Instruction configuration structure - */ -struct osi_core_frp_data { - /* Entry Match Data */ - unsigned int match_data; - /* Entry Match Enable mask */ - unsigned int match_en; - /* Entry Accept frame flag */ - unsigned char accept_frame; - /* Entry Reject Frame flag */ - unsigned char reject_frame; - /* Entry Inverse match flag */ - unsigned char inverse_match; - /* Entry Next Instruction Control match flag */ - unsigned char next_ins_ctrl; - /* Entry Frame offset in the packet data */ - unsigned char frame_offset; - /* Entry OK Index - Next Instruction */ - unsigned char ok_index; - /* Entry DMA Channel selection (1-bit for each channel) */ - unsigned int dma_chsel; -}; - -/** - * @brief FRP command structure for OSD to OSI - */ -struct osi_core_frp_cmd { - /* FRP Command type */ - unsigned int cmd; - /* OSD FRP ID */ - int frp_id; - /* OSD match data type */ - unsigned char match_type; - /* OSD match data */ - unsigned char match[OSI_FRP_MATCH_DATA_MAX]; - /* OSD match data length */ - unsigned char match_length; - /* OSD Offset */ - unsigned char offset; - /* OSD FRP filter mode flag */ - unsigned char filter_mode; - /* OSD FRP Link ID */ - int next_frp_id; - /* OSD DMA Channel Selection */ - unsigned int dma_sel; -}; - -/** - * @brief FRP Instruction table entry configuration structure - */ -struct osi_core_frp_entry { - /* FRP ID */ - int frp_id; - /* FRP Entry data structure */ - struct osi_core_frp_data data; -}; - /** * @brief L2 filter function dependent parameter */ @@ -868,124 +822,64 @@ struct osi_l2_da_filter { nveu32_t perfect_inverse_match; }; -/** - * @brief OSI Core avb data structure per queue. - */ -struct osi_core_avb_algorithm { - /** TX Queue/TC index */ - nveu32_t qindex; - /** CBS Algorithm enable(1) or disable(0) */ - nveu32_t algo; - /** When this bit is set, the accumulated credit parameter in the - * credit-based shaper algorithm logic is not reset to zero when - * there is positive credit and no packet to transmit in Channel. - * - * Expected values are enable(1) or disable(0) */ - nveu32_t credit_control; - /** idleSlopeCredit value required for CBS */ - nveu32_t idle_slope; - /** sendSlopeCredit value required for CBS */ - nveu32_t send_slope; - /** hiCredit value required for CBS */ - nveu32_t hi_credit; - /** lowCredit value required for CBS */ - nveu32_t low_credit; - /** Transmit queue operating mode - * - * 00: disable - * - * 01: avb - * - * 10: enable */ - nveu32_t oper_mode; - /** TC index */ - unsigned int tcindex; -}; -#endif /* !OSI_STRIPPED_LIB */ - /** * @brief struct ptp_offload_param - Parameter to support PTP offload. */ struct osi_pto_config { /** enable(0) / disable(1) */ - unsigned int en_dis; + nveu32_t en_dis; /** Flag for Master mode. * OSI_ENABLE for master OSI_DISABLE for slave */ - unsigned int master; + nveu32_t master; /** Flag to Select PTP packets for Taking Snapshots */ - unsigned int snap_type; + nveu32_t snap_type; /** ptp domain */ - unsigned int domain_num; + nveu32_t domain_num; /** The PTP Offload function qualifies received PTP * packet with unicast Destination address * 0 - only multicast, 1 - unicast and multicast */ - unsigned int mc_uc; + nveu32_t mc_uc; /** Port identification */ - unsigned int portid; + nveu32_t portid; }; /** - * @brief OSI Core EST structure + * @brief osi_core_rss - Struture used to store RSS Hash key and table + * information. */ -struct osi_est_config { - /** enable/disable */ - unsigned int en_dis; - /** 64 bit base time register - * if both vlaues are 0, take ptp time to avoid BTRE - * index 0 for nsec, index 1 for sec - */ - unsigned int btr[2]; - /** 64 bit base time offset index 0 for nsec, index 1 for sec */ - unsigned int btr_offset[2]; - /** 40 bit cycle time register, index 0 for nsec, index 1 for sec */ - unsigned int ctr[2]; - /** Configured Time Interval width + 7 bit extension register */ - unsigned int ter; - /** size of the gate control list */ - unsigned int llr; - /** data array 8 bit gate op + 24 execution time - * MGBE HW support GCL depth 256 */ - unsigned int gcl[OSI_GCL_SIZE_256]; +struct osi_core_rss { + /** Flag to represent to enable RSS or not */ + nveu32_t enable; + /** Array for storing RSS Hash key */ + nveu8_t key[OSI_RSS_HASH_KEY_SIZE]; + /** Array for storing RSS Hash table */ + nveu32_t table[OSI_RSS_MAX_TABLE_SIZE]; }; /** - * @brief OSI Core FPE structure + * @brief Max num of MAC core registers to backup. It should be max of or >= + * (EQOS_MAX_BAK_IDX=380, coreX,...etc) backup registers. */ -struct osi_fpe_config { - /** Queue Mask 1 preemption 0- express bit representation */ - unsigned int tx_queue_preemption_enable; - /** RQ for all preemptable packets which are not filtered - * based on user priority or SA-DA - */ - unsigned int rq; -}; +#define CORE_MAX_BAK_IDX 700U /** - * @brief OSI Core TSN error stats structure + * @brief core_backup - Struct used to store backup of core HW registers. */ -struct osi_tsn_stats { - /** Constant Gate Control Error */ - unsigned long const_gate_ctr_err; - /** Head-Of-Line Blocking due to Scheduling */ - unsigned long head_of_line_blk_sch; - /** Per TC Schedule Error */ - unsigned long hlbs_q[OSI_MAX_TC_NUM]; - /** Head-Of-Line Blocking due to Frame Size */ - unsigned long head_of_line_blk_frm; - /** Per TC Frame Size Error */ - unsigned long hlbf_q[OSI_MAX_TC_NUM]; - /** BTR Error */ - unsigned long base_time_reg_err; - /** Switch to Software Owned List Complete */ - unsigned long sw_own_list_complete; +struct core_backup { + /** Array of reg MMIO addresses (base of MAC + offset of reg) */ + void *reg_addr[CORE_MAX_BAK_IDX]; + /** Array of value stored in each corresponding register */ + nveu32_t reg_val[CORE_MAX_BAK_IDX]; }; +#endif /* !OSI_STRIPPED_LIB */ + /** * @brief PTP configuration structure */ struct osi_ptp_config { /** PTP filter parameters bit fields. - * + * * Enable Timestamp, Fine Timestamp, 1 nanosecond accuracy * are enabled by default. * @@ -1012,7 +906,7 @@ struct osi_ptp_config { * Select PTP packets for Taking Snapshots (OSI_BIT(16) + OSI_BIT(17)) * * AV 802.1AS Mode Enable OSI_BIT(28) - * + * * if ptp_filter is set to Zero then Time stamping is disabled */ nveu32_t ptp_filter; /** seconds to be updated to MAC */ @@ -1029,19 +923,6 @@ struct osi_ptp_config { nveu32_t ptp_rx_queue; }; -/** - * @brief osi_core_rss - Struture used to store RSS Hash key and table - * information. - */ -struct osi_core_rss { - /** Flag to represent to enable RSS or not */ - unsigned int enable; - /** Array for storing RSS Hash key */ - unsigned char key[OSI_RSS_HASH_KEY_SIZE]; - /** Array for storing RSS Hash table */ - unsigned int table[OSI_RSS_MAX_TABLE_SIZE]; -}; - /** * @brief osi_core_ptp_tsc_data - Struture used to store TSC and PTP time * information. @@ -1057,22 +938,6 @@ struct osi_core_ptp_tsc_data { nveu32_t tsc_low_bits; }; -/** - * @brief Max num of MAC core registers to backup. It should be max of or >= - * (EQOS_MAX_BAK_IDX=380, coreX,...etc) backup registers. - */ -#define CORE_MAX_BAK_IDX 700U - -/** - * @brief core_backup - Struct used to store backup of core HW registers. - */ -struct core_backup { - /** Array of reg MMIO addresses (base of MAC + offset of reg) */ - void *reg_addr[CORE_MAX_BAK_IDX]; - /** Array of value stored in each corresponding register */ - nveu32_t reg_val[CORE_MAX_BAK_IDX]; -}; - /** * @brief OSI VM IRQ data */ @@ -1090,7 +955,7 @@ struct osi_vm_irq_data { */ struct osd_core_ops { /** padctrl rx pin disable/enable callback */ - int (*padctrl_mii_rx_pins)(void *priv, nveu32_t enable); + nve32_t (*padctrl_mii_rx_pins)(void *priv, nveu32_t enable); /** logging callback */ void (*ops_log)(void *priv, const nve8_t *func, nveu32_t line, nveu32_t level, nveu32_t type, const nve8_t *err, @@ -1106,7 +971,7 @@ struct osd_core_ops { nveu32_t len); #ifdef MACSEC_SUPPORT /** Program macsec key table through Trust Zone callback */ - nve32_t (*macsec_tz_kt_config)(void *priv, unsigned char cmd, + nve32_t (*macsec_tz_kt_config)(void *priv, nveu8_t cmd, void *const kt_config, void *const genl_info); #endif /* MACSEC_SUPPORT */ @@ -1116,6 +981,8 @@ struct osd_core_ops { nveu32_t type, const char *fmt, ...); #endif + /** Lane bringup restart callback */ + void (*restart_lane_bringup)(void *priv, nveu32_t en_disable); }; #ifdef MACSEC_SUPPORT @@ -1126,7 +993,7 @@ struct osi_macsec_sc_info { /** Secure channel identifier */ nveu8_t sci[OSI_SCI_LEN]; /** Secure association key */ - nveu8_t sak[OSI_KEY_LEN_128]; + nveu8_t sak[OSI_KEY_LEN_256]; #ifdef MACSEC_KEY_PROGRAM /** Secure association key */ nveu8_t hkey[OSI_KEY_LEN_128]; @@ -1204,6 +1071,40 @@ struct osi_macsec_irq_stats { }; #endif /* MACSEC_SUPPORT */ +/** + * @brief FRP Instruction configuration structure + */ +struct osi_core_frp_data { + /** Entry Match Data */ + nveu32_t match_data; + /** Entry Match Enable mask */ + nveu32_t match_en; + /** Entry Accept frame flag */ + nveu8_t accept_frame; + /** Entry Reject Frame flag */ + nveu8_t reject_frame; + /** Entry Inverse match flag */ + nveu8_t inverse_match; + /** Entry Next Instruction Control match flag */ + nveu8_t next_ins_ctrl; + /** Entry Frame offset in the packet data */ + nveu8_t frame_offset; + /** Entry OK Index - Next Instruction */ + nveu8_t ok_index; + /** Entry DMA Channel selection (1-bit for each channel) */ + nveu32_t dma_chsel; +}; + +/** + * @brief FRP Instruction table entry configuration structure + */ +struct osi_core_frp_entry { + /** FRP ID */ + nve32_t frp_id; + /** FRP Entry data structure */ + struct osi_core_frp_data data; +}; + /** * @brief Core time stamp data strcuture */ @@ -1250,21 +1151,21 @@ struct osi_ioctl { struct osi_l3_l4_filter l3l4_filter; /* HW feature structure */ struct osi_hw_features hw_feat; -#ifndef OSI_STRIPPED_LIB - /* AVB structure */ + /** AVB structure */ struct osi_core_avb_algorithm avb; - /* VLAN filter structure */ +#ifndef OSI_STRIPPED_LIB + /** VLAN filter structure */ struct osi_vlan_filter vlan_filter; -#endif /* !OSI_STRIPPED_LIB */ - /* PTP offload config structure*/ + /** PTP offload config structure*/ struct osi_pto_config pto_config; - /* RXQ route structure */ + /** RXQ route structure */ struct osi_rxq_route rxq_route; - /* FRP structure */ +#endif /* !OSI_STRIPPED_LIB */ + /** FRP structure */ struct osi_core_frp_cmd frp_cmd; - /* EST structure */ + /** EST structure */ struct osi_est_config est; - /* FRP structure */ + /** FRP structure */ struct osi_fpe_config fpe; /** PTP configuration settings */ struct osi_ptp_config ptp_config; @@ -1281,33 +1182,23 @@ struct core_padctrl { /** Memory mapped base address of eqos padctrl registers */ void *padctrl_base; /** EQOS_RD0_0 register offset */ - unsigned int offset_rd0; + nveu32_t offset_rd0; /** EQOS_RD1_0 register offset */ - unsigned int offset_rd1; + nveu32_t offset_rd1; /** EQOS_RD2_0 register offset */ - unsigned int offset_rd2; + nveu32_t offset_rd2; /** EQOS_RD3_0 register offset */ - unsigned int offset_rd3; + nveu32_t offset_rd3; /** RX_CTL_0 register offset */ - unsigned int offset_rx_ctl; + nveu32_t offset_rx_ctl; /** is pad calibration in progress */ - unsigned int is_pad_cal_in_progress; + nveu32_t is_pad_cal_in_progress; /** This flag set/reset using priv ioctl and DT entry */ - unsigned int pad_calibration_enable; -}; - -/** - * @brief OSI CORE packet error stats - */ -struct osi_core_pkt_err_stats { - /** IP Header Error */ - nveu64_t mgbe_ip_header_err; - /** Jabber time out Error */ - nveu64_t mgbe_jabber_timeout_err; - /** Payload Checksum Error */ - nveu64_t mgbe_payload_cs_err; - /** Under Flow Error */ - nveu64_t mgbe_tx_underflow_err; + nveu32_t pad_calibration_enable; + /** Reg ETHER_QOS_AUTO_CAL_CONFIG_0[AUTO_CAL_PD_OFFSET] value */ + nveu32_t pad_auto_cal_pd_offset; + /** Reg ETHER_QOS_AUTO_CAL_CONFIG_0[AUTO_CAL_PU_OFFSET] value */ + nveu32_t pad_auto_cal_pu_offset; }; #ifdef HSI_SUPPORT @@ -1322,11 +1213,11 @@ struct osi_hsi_data { /** error count threshold to report error */ nveu32_t err_count_threshold; /** HSI reporter ID */ - nveu32_t reporter_id; + nveu16_t reporter_id; /** HSI error codes */ - nveu32_t err_code[HSI_MAX_MAC_ERROR_CODE]; + nveu32_t err_code[OSI_HSI_MAX_MAC_ERROR_CODE]; /** HSI MAC report count threshold based error */ - nveu32_t report_count_err[HSI_MAX_MAC_ERROR_CODE]; + nveu32_t report_count_err[OSI_HSI_MAX_MAC_ERROR_CODE]; /** Indicates if error reporting to FSI is pending */ nveu32_t report_err; /** HSI MACSEC error codes */ @@ -1353,6 +1244,10 @@ struct osi_hsi_data { nveu64_t tx_frame_err_count; /** tx frame error count threshold hit */ nveu64_t tx_frame_err_threshold; + /** Rx UDP error injection count */ + nveu64_t inject_udp_err_count; + /** Rx CRC error injection count */ + nveu64_t inject_crc_err_count; }; #endif @@ -1362,8 +1257,6 @@ struct osi_hsi_data { struct osi_core_priv_data { /** Memory mapped base address of MAC IP */ void *base; - /** Memory mapped base address of HV window */ - void *hv_base; /** Memory mapped base address of DMA window of MAC IP */ void *dma_base; /** Memory mapped base address of XPCS IP */ @@ -1389,7 +1282,7 @@ struct osi_core_priv_data { /** FPE HW configuration initited to enable/disable * 1- FPE HW configuration initiated to enable * 0- FPE HW configuration initiated to disable */ - unsigned int is_fpe_enabled; + nveu32_t is_fpe_enabled; #endif /* MACSEC_SUPPORT */ /** Pointer to OSD private data structure */ void *osd; @@ -1403,24 +1296,16 @@ struct osi_core_priv_data { nveu32_t rxq_ctrl[OSI_MGBE_MAX_NUM_CHANS]; /** Rx MTl Queue mapping based on User Priority field */ nveu32_t rxq_prio[OSI_MGBE_MAX_NUM_CHANS]; - /** TQ:TC mapping */ - unsigned int tc[OSI_MGBE_MAX_NUM_CHANS]; - /** Residual queue valid with FPE support */ - unsigned int residual_queue; /** MAC HW type EQOS based on DT compatible */ nveu32_t mac; /** MAC version */ nveu32_t mac_ver; /** HW supported feature list */ struct osi_hw_features *hw_feat; - /** MDC clock rate */ - nveu32_t mdc_cr; /** MTU size */ nveu32_t mtu; /** Ethernet MAC address */ nveu8_t mac_addr[OSI_ETH_ALEN]; - /** DT entry to enable(0) or disable(1) pause frame support */ - nveu32_t pause_frames; /** Current flow control settings */ nveu32_t flow_ctrl; /** PTP configuration settings */ @@ -1429,49 +1314,56 @@ struct osi_core_priv_data { nveu32_t default_addend; /** mmc counter structure */ struct osi_mmc_counters mmc; - /** xtra sw error counters */ - struct osi_xtra_stat_counters xstats; /** DMA channel selection enable (1) */ nveu32_t dcs_en; - /** Functional safety config to do periodic read-verify of - * certain safety critical registers */ - void *safety_config; - /** Backup config to save/restore registers during suspend/resume */ - struct core_backup backup_config; - /** VLAN tag stripping enable(1) or disable(0) */ - nveu32_t strip_vlan_tag; - /** L3L4 filter bit bask, set index corresponding bit for - * filter if filter enabled */ - nveu32_t l3l4_filter_bitmask; + /** TQ:TC mapping */ + nveu32_t tc[OSI_MGBE_MAX_NUM_CHANS]; +#ifndef OSI_STRIPPED_LIB + /** Memory mapped base address of HV window */ + void *hv_base; /** csr clock is to program LPI 1 us tick timer register. * Value stored in MHz */ nveu32_t csr_clk_speed; - /** Tegra Pre-si platform info */ - nveu32_t pre_si; - /** Flag which decides virtualization is enabled(1) or disabled(0) */ - nveu32_t use_virtualization; - unsigned long vf_bitmap; - /** Array to maintaion VLAN filters */ - unsigned short vid[VLAN_NUM_VID]; + nveu64_t vf_bitmap; + /** Array to maintain VLAN filters */ + nveu16_t vid[VLAN_NUM_VID]; /** Count of number of VLAN filters in vid array */ - unsigned short vlan_filter_cnt; + nveu16_t vlan_filter_cnt; + /** RSS core structure */ + struct osi_core_rss rss; + /** DT entry to enable(1) or disable(0) pause frame support */ + nveu32_t pause_frames; +#endif + /** Residual queue valid with FPE support */ + nveu32_t residual_queue; /** FRP Instruction Table */ struct osi_core_frp_entry frp_table[OSI_FRP_MAX_ENTRY]; /** Number of valid Entries in the FRP Instruction Table */ - unsigned int frp_cnt; - /** RSS core structure */ - struct osi_core_rss rss; + nveu32_t frp_cnt; + /* Switch to Software Owned List Complete. + * 1 - Successful and User configured GCL in placed + */ + nveu32_t est_ready; + /* FPE enabled, verify and respose done with peer device + * 1- Successful and can be used between P2P device + */ + nveu32_t fpe_ready; + /** MAC stats counters */ + struct osi_stats stats; + /** eqos pad control structure */ + struct core_padctrl padctrl; + /** MDC clock rate */ + nveu32_t mdc_cr; + /** VLAN tag stripping enable(1) or disable(0) */ + nveu32_t strip_vlan_tag; + /** L3L4 filter bit bask, set index corresponding bit for + * filter if filter enabled */ + nveu32_t l3l4_filter_bitmask; + /** Flag which decides virtualization is enabled(1) or disabled(0) */ + nveu32_t use_virtualization; /** HW supported feature list */ struct osi_hw_features *hw_feature; - /** Switch to Software Owned List Complete. - * 1 - Successful and User configured GCL in placed */ - unsigned int est_ready; - /** FPE enabled, verify and respose done with peer device - * 1- Sucessful and can be used between P2P device */ - unsigned int fpe_ready; - /** TSN stats counters */ - struct osi_tsn_stats tsn_stats; /** MC packets Multiple DMA channel selection flags */ nveu32_t mc_dmasel; /** UPHY GBE mode (1 for 10G, 0 for 5G) */ @@ -1482,12 +1374,8 @@ struct osi_core_priv_data { nveu32_t num_vm_irqs; /** PHY interface mode (0/1 for XFI 10/5G, 2/3 for USXGMII 10/5) */ nveu32_t phy_iface_mode; - /** eqos pad control structure */ - struct core_padctrl padctrl; /** MGBE MAC instance ID's */ nveu32_t instance_id; - /** Packet error stats */ - struct osi_core_pkt_err_stats pkt_err_stats; /** Ethernet controller MAC to MAC Time sync role * 1 - Primary interface, 2 - secondary interface, 0 - inactive interface */ @@ -1500,51 +1388,14 @@ struct osi_core_priv_data { #endif }; -/** - * @brief osi_poll_for_mac_reset_complete - Poll Software reset bit in MAC HW - * - * @note - * Algorithm: - * - Invokes EQOS routine to check for SWR (software reset) - * bit in DMA Basic mode register to make sure IP reset was successful. - * - * @param[in] osi_core: OSI Core private data structure. - * - * @pre MAC needs to be out of reset and proper clock configured. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_004 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ - -nve32_t osi_poll_for_mac_reset_complete( - struct osi_core_priv_data *const osi_core); - /** * @brief osi_hw_core_init - EQOS MAC, MTL and common DMA initialization. - * + * * @note * Algorithm: * - Invokes EQOS MAC, MTL and common DMA register init code. * * @param[in, out] osi_core: OSI core private data structure. - * @param[in] tx_fifo_size: OSI core private data structure. - * @param[in] rx_fifo_size: OSI core private data structure. * * @pre * - MAC should be out of reset. See osi_poll_for_mac_reset_complete() @@ -1566,931 +1417,70 @@ nve32_t osi_poll_for_mac_reset_complete( * - Required Privileges: None * - API Group: * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_hw_core_init(struct osi_core_priv_data *const osi_core, - nveu32_t tx_fifo_size, nveu32_t rx_fifo_size); - -/** - * @brief osi_hw_core_deinit - EQOS MAC deinitialization. - * - * @note - * Algorithm: - * - Stops MAC transmission and reception. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC has to be out of reset. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_007 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_hw_core_deinit(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_start_mac - Start MAC Tx/Rx engine - * - * @note - * Algorithm: - * - Enable MAC Tx and Rx engine. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC init should be complete. See osi_hw_core_init() and - * osi_hw_dma_init() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_008 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_start_mac(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_stop_mac - Stop MAC Tx/Rx engine - * - * @note - * Algorithm: - * - Stop MAC Tx and Rx engine - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC DMA deinit should be complete. See osi_hw_dma_deinit() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_009 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_stop_mac(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_common_isr - Common ISR. - * - * @note - * Algorithm: - * - Takes care of handling the common interrupts accordingly as per - * the MAC IP - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_010 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: Yes - * - Signal handler: Yes - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_common_isr(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_set_mode - Set FD/HD mode. - * - * @note - * Algorithm: - * - Takes care of setting HD or FD mode accordingly as per the MAC IP - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] mode: Operating mode. (OSI_FULL_DUPLEX/OSI_HALF_DUPLEX) - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_011 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_set_mode(struct osi_core_priv_data *const osi_core, - const nve32_t mode); - -/** - * @brief osi_set_speed - Set operating speed. - * - * @note - * Algorithm: - * - Takes care of setting the operating speed accordingly as per - * the MAC IP. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] speed: Operating speed. - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_012 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_set_speed(struct osi_core_priv_data *const osi_core, - const nve32_t speed); - -/** - * @brief osi_pad_calibrate - PAD calibration - * - * @note - * Algorithm: - * - Takes care of doing the pad calibration - * accordingly as per the MAC IP. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre - * - MAC should out of reset and clocks enabled. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_013 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 value on failure or pad calibration is disabled - */ -nve32_t osi_pad_calibrate(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_config_fw_err_pkts - Configure forwarding of error packets - * - * @note - * Algorithm: - * - Configure MAC to enable/disable forwarding of error packets. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] qinx: Q index. Max OSI_EQOS_MAX_NUM_QUEUES. - * @param[in] fw_err: Enable or disable forwarding of error packets. - * 0: Disable 1: Enable - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_020 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_config_fw_err_pkts(struct osi_core_priv_data *const osi_core, - const nveu32_t qinx, const nveu32_t fw_err); - -/** - * @brief osi_config_rxcsum_offload - Configure RX checksum offload in MAC. - * - * @note - * Algorithm: - * - Invokes EQOS config RX checksum offload routine. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] enable: Enable/disable flag. 0: Disable 1: Enable - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_017 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_config_rxcsum_offload(struct osi_core_priv_data *const osi_core, - const nveu32_t enable); - -/** - * @brief osi_l2_filter - configure L2 mac filter. - * - * @note - * Algorithm: - * - This sequence is used to configure MAC in different packet - * processing modes like promiscuous, multicast, unicast, - * hash unicast/multicast and perfect/inverse matching for L2 DA - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter: OSI filter structure. - * - * @pre - * - MAC should be initialized and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_018 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_l2_filter(struct osi_core_priv_data *const osi_core, - const struct osi_filter *filter); - -/** - * @brief osi_write_phy_reg - Write to a PHY register through MAC over MDIO bus. - * - * @note - * Algorithm: - * - Before proceeding for reading for PHY register check whether any MII - * operation going on MDIO bus by polling MAC_GMII_BUSY bit. - * - Program data into MAC MDIO data register. - * - Populate required parameters like phy address, phy register etc,, - * in MAC MDIO Address register. write and GMII busy bits needs to be set - * in this operation. - * - Write into MAC MDIO address register poll for GMII busy for MDIO - * operation to complete. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] phyaddr: PHY address (PHY ID) associated with PHY - * @param[in] phyreg: Register which needs to be write to PHY. - * @param[in] phydata: Data to write to a PHY register. - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_002 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_write_phy_reg(struct osi_core_priv_data *const osi_core, - const nveu32_t phyaddr, const nveu32_t phyreg, - const nveu16_t phydata); - -/** - * @brief osi_read_mmc - invoke function to read actual registers and update - * structure variable mmc - * - * @note - * Algorithm: - * - Read the registers, mask reserve bits if required, update - * structure. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre - * - MAC should be init and started. see osi_start_mac() - * - osi_core->osd should be populated - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_014 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_read_mmc(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_read_phy_reg - Read from a PHY register through MAC over MDIO bus. - * - * @note - * Algorithm: - * - Before proceeding for reading for PHY register check whether any MII - * operation going on MDIO bus by polling MAC_GMII_BUSY bit. - * - Populate required parameters like phy address, phy register etc,, - * in program it in MAC MDIO Address register. Read and GMII busy bits - * needs to be set in this operation. - * - Write into MAC MDIO address register poll for GMII busy for MDIO - * operation to complete. After this data will be available at MAC MDIO - * data register. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] phyaddr: PHY address (PHY ID) associated with PHY - * @param[in] phyreg: Register which needs to be read from PHY. - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_003 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval data from PHY register on success - * @retval -1 on failure - */ -nve32_t osi_read_phy_reg(struct osi_core_priv_data *const osi_core, - const nveu32_t phyaddr, - const nveu32_t phyreg); - -/** - * @brief initializing the core operations - * - * @param[in] osi_core: OSI core private data structure. - * - * @retval data from PHY register on success - * @retval -1 on failure - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_001 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - */ -nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_set_systime_to_mac - Handles setting of system time. - * - * @note - * Algorithm: - * - Set current system time to MAC. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] sec: Seconds to be configured. - * @param[in] nsec: Nano seconds to be configured. - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_005 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_set_systime_to_mac(struct osi_core_priv_data *const osi_core, - const nveu32_t sec, const nveu32_t nsec); - -/** - * @brief osi_adjust_freq - Adjust frequency - * - * @note - * Algorithm: - * - Adjust a drift of +/- comp nanoseconds per second. - * "Compensation" is the difference in frequency between - * the master and slave clocks in Parts Per Billion. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] ppb: Parts per Billion - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_023 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_adjust_freq(struct osi_core_priv_data *const osi_core, nve32_t ppb); - -/** - * @brief osi_adjust_time - Adjust MAC time with system time - * - * @note - * Algorithm: - * - Adjust/update the MAC time (delta time from MAC to system time - * passed in nanoseconds, can be + or -). - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] nsec_delta: Delta time in nano seconds - * - * @pre - * - MAC should be init and started. see osi_start_mac() - * - osi_core->ptp_config.one_nsec_accuracy need to be set to 1 - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_022 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_adjust_time(struct osi_core_priv_data *const osi_core, - nvel64_t nsec_delta); - -/** - * @brief osi_ptp_configuration - Configure PTP - * - * @note - * Algorithm: - * - Configure the PTP registers that are required for PTP. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] enable: Enable or disable Time Stamping. 0: Disable 1: Enable - * - * @pre - * - MAC should be init and started. see osi_start_mac() - * - osi->ptp_config.ptp_filter need to be filled accordingly to the - * filter that need to be set for PTP packets. Please check osi_ptp_config - * structure declaration on the bit fields that need to be filled. - * - osi->ptp_config.ptp_clock need to be filled with the ptp system clk. - * Currently it is set to 62500000Hz. - * - osi->ptp_config.ptp_ref_clk_rate need to be filled with the ptp - * reference clock that platform supports. - * - osi->ptp_config.sec need to be filled with current time of seconds - * - osi->ptp_config.nsec need to be filled with current time of nseconds - * - osi->base need to be filled with the ioremapped base address - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_021 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_ptp_configuration(struct osi_core_priv_data *const osi_core, - const nveu32_t enable); - -/* MAC version specific implementation function prototypes added here - * for misra compliance to have - * 1. Visible prototype for all functions. - * 2. Only one prototype for all function. - */ -void *eqos_get_core_safety_config(void); - -/** - * @brief osi_l3l4_filter - invoke OSI call to add L3/L4 - * filters. - * - * @note - * Algorithm: - * - This routine is to enable/disable L3/l4 filter. - * Check for DCS enable as well as validate channel - * number if dcs_enable is set. After validation, configure L3(IPv4/IPv6) - * filters register for given address. Based on input arguments update - * IPv4/IPv6 source/destination address for L3 layer filtering or source and - * destination Port Number for L4(TCP/UDP) layer - * filtering. - * - * @param[in, out] osi_core: OSI core private data structure. - * @param[in] l_filter: L3L4 filter data structure. - * @param[in] type: L3 filter (ipv4(0) or ipv6(1)) - * or L4 filter (tcp(0) or udp(1)) - * @param[in] dma_routing_enable: filter based dma routing enable(1) - * @param[in] dma_chan: dma channel for routing based on filter. - * Max OSI_EQOS_MAX_NUM_CHANS. - * @param[in] is_l4_filter: API call for L3 filter(0) or L4 filter(1) - * - * @pre - * - MAC should be init and started. see osi_start_mac() - * - Concurrent invocations to configure filters is not supported. - * OSD driver shall serialize calls. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_019 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_l3l4_filter(struct osi_core_priv_data *const osi_core, - const struct osi_l3_l4_filter l_filter, - const nveu32_t type, - const nveu32_t dma_routing_enable, - const nveu32_t dma_chan, - const nveu32_t is_l4_filter); - -/** - * @brief osi_get_mac_version - Reading MAC version - * - * @note - * Algorithm: - * - Reads MAC version and check whether its valid or not. - * - * @param[in] osi_core: OSI core private data structure. - * @param[out] mac_ver: holds mac version. - * - * @pre MAC has to be out of reset. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_015 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_get_mac_version(struct osi_core_priv_data *const osi_core, - nveu32_t *mac_ver); - -/** - * @brief osi_get_hw_features - Reading MAC HW features - * - * @param[in] osi_core: OSI core private data structure. - * @param[out] hw_feat: holds the supported features of the hardware. - * - * @pre MAC has to be out of reset. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_016 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_get_hw_features(struct osi_core_priv_data *const osi_core, - struct osi_hw_features *hw_feat); - -/** - * @brief osi_handle_ioctl - API to handle runtime command - * - * @note - * Algorithm: - * - Handle runtime commands to OSI - * - OSI_CMD_MDC_CONFIG - * Derive MDC clock based on provided AXI_CBB clk - * arg1_u32 - CSR (AXI CBB) clock rate. - * - OSI_CMD_RESTORE_REGISTER - * Restore backup of MAC MMIO address space - * - OSI_CMD_POLL_FOR_MAC_RST - * Poll Software reset bit in MAC HW - * - OSI_CMD_START_MAC - * Start MAC Tx/Rx engine - * - OSI_CMD_STOP_MAC - * Stop MAC Tx/Rx engine - * - OSI_CMD_COMMON_ISR - * Common ISR handler - * - OSI_CMD_PAD_CALIBRATION - * PAD calibration - * - OSI_CMD_READ_MMC - * invoke function to read actual registers and update - * structure variable mmc - * - OSI_CMD_GET_MAC_VER - * Reading MAC version - * arg1_u32 - holds mac version - * - OSI_CMD_VALIDATE_CORE_REG - * Read-validate HW registers for func safety - * - OSI_CMD_RESET_MMC - * invoke function to reset MMC counter and data - * structure - * - OSI_CMD_SAVE_REGISTER - * Take backup of MAC MMIO address space - * - OSI_CMD_MAC_LB - * Configure MAC loopback - * - OSI_CMD_FLOW_CTRL - * Configure flow control settings - * arg1_u32 - Enable or disable flow control settings - * - OSI_CMD_SET_MODE - * Set Full/Half Duplex mode. - * arg1_u32 - mode - * - OSI_CMD_SET_SPEED - * Set Operating speed - * arg1_u32 - Operating speed - * - OSI_CMD_L2_FILTER - * configure L2 mac filter - * l2_filter_struct - OSI filter structure - * - OSI_CMD_RXCSUM_OFFLOAD - * Configure RX checksum offload in MAC - * arg1_u32 - enable(1)/disable(0) - * - OSI_CMD_ADJ_FREQ - * Adjust frequency - * arg6_u32 - Parts per Billion - * - OSI_CMD_ADJ_TIME - * Adjust MAC time with system time - * arg1_u32 - Delta time in nano seconds - * - OSI_CMD_CONFIG_PTP - * Configure PTP - * arg1_u32 - Enable(1) or disable(0) Time Stamping - * - OSI_CMD_GET_AVB - * Get CBS algo and parameters - * avb_struct - osi core avb data structure - * - OSI_CMD_SET_AVB - * Set CBS algo and parameters - * avb_struct - osi core avb data structure - * - OSI_CMD_CONFIG_RX_CRC_CHECK - * Configure CRC Checking for Received Packets - * arg1_u32 - Enable or disable checking of CRC field in - * received pkts - * - OSI_CMD_UPDATE_VLAN_ID - * invoke osi call to update VLAN ID - * arg1_u32 - VLAN ID - * - OSI_CMD_CONFIG_TXSTATUS - * Configure Tx packet status reporting - * Enable(1) or disable(0) tx packet status reporting - * - OSI_CMD_GET_HW_FEAT - * Reading MAC HW features - * hw_feat_struct - holds the supported features of the hardware - * - OSI_CMD_CONFIG_FW_ERR - * Configure forwarding of error packets - * arg1_u32 - queue index, Max OSI_EQOS_MAX_NUM_QUEUES - * arg2_u32 - FWD error enable(1)/disable(0) - * - OSI_CMD_ARP_OFFLOAD - * Configure ARP offload in MAC - * arg1_u32 - Enable/disable flag - * arg7_u8_p - Char array representation of IP address - * - OSI_CMD_VLAN_FILTER - * OSI call for configuring VLAN filter - * vlan_filter - vlan filter structure - * - OSI_CMD_CONFIG_EEE - * Configure EEE LPI in MAC - * arg1_u32 - Enable (1)/disable (0) tx lpi - * arg2_u32 - Tx LPI entry timer in usecs upto - * OSI_MAX_TX_LPI_TIMER (in steps of 8usec) - * - OSI_CMD_L3L4_FILTER - * invoke OSI call to add L3/L4 - * l3l4_filter - l3_l4 filter structure - * arg1_u32 - L3 filter (ipv4(0) or ipv6(1)) - * or L4 filter (tcp(0) or udp(1) - * arg2_u32 - filter based dma routing enable(1) - * arg3_u32 - dma channel for routing based on filter. - * Max OSI_EQOS_MAX_NUM_CHANS. - * arg4_u32 - API call for L3 filter(0) or L4 filter(1) - * - OSI_CMD_SET_SYSTOHW_TIME - * set system to MAC hardware - * arg1_u32 - sec - * arg1_u32 - nsec - * - OSI_CMD_CONFIG_PTP_OFFLOAD - * enable/disable PTP offload feature - * pto_config - ptp offload structure - * - OSI_CMD_PTP_RXQ_ROUTE - * rxq routing to secific queue - * rxq_route - rxq routing information in structure - * - OSI_CMD_CONFIG_FRP - * Issue FRP command to HW - * frp_cmd - FRP command parameter - * - OSI_CMD_CONFIG_RSS - * Configure RSS - * - OSI_CMD_CONFIG_EST - * Configure EST registers and GCL to hw - * est - EST configuration structure - * - OSI_CMD_CONFIG_FPE - * Configuration FPE register and preemptable queue - * fpe - FPE configuration structure + * - Run time: No + * - De-initialization: No * - * - OSI_CMD_GET_TX_TS - * Command to get TX timestamp for PTP packet - * ts - OSI core timestamp structure + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t osi_hw_core_init(struct osi_core_priv_data *const osi_core); + +/** + * @brief osi_hw_core_deinit - EQOS MAC deinitialization. * - * - OSI_CMD_FREE_TS - * Command to free old timestamp for PTP packet - * chan - DMA channel number +1. 0 will be used for onestep + * @note + * Algorithm: + * - Stops MAC transmission and reception. * - * - OSI_CMD_CAP_TSC_PTP - * Capture TSC and PTP time stamp - * ptp_tsc_data - output structure with time + * @param[in] osi_core: OSI core private data structure. * - * - OSI_CMD_CONF_M2M_TS - * Enable/Disable MAC to MAC time sync for Secondary interface - * enable_disable - 1 - enable, 0- disable + * @pre MAC has to be out of reset. + * + * @note + * Traceability Details: + * - SWUD_ID: ETHERNET_NVETHERNETRM_007 + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: No + * - Run time: No + * - De-initialization: Yes + * + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t osi_hw_core_deinit(struct osi_core_priv_data *const osi_core); + +/** + * @brief osi_write_phy_reg - Write to a PHY register through MAC over MDIO bus. + * + * @note + * Algorithm: + * - Before proceeding for reading for PHY register check whether any MII + * operation going on MDIO bus by polling MAC_GMII_BUSY bit. + * - Program data into MAC MDIO data register. + * - Populate required parameters like phy address, phy register etc,, + * in MAC MDIO Address register. write and GMII busy bits needs to be set + * in this operation. + * - Write into MAC MDIO address register poll for GMII busy for MDIO + * operation to complete. * * @param[in] osi_core: OSI core private data structure. - * @param[in] data: void pointer pointing to osi_ioctl + * @param[in] phyaddr: PHY address (PHY ID) associated with PHY + * @param[in] phyreg: Register which needs to be write to PHY. + * @param[in] phydata: Data to write to a PHY register. * * @pre MAC should be init and started. see osi_start_mac() * * @note * Traceability Details: + * - SWUD_ID: ETHERNET_NVETHERNETRM_002 * * @usage * - Allowed context for the API call @@ -2500,28 +1490,71 @@ nve32_t osi_get_hw_features(struct osi_core_priv_data *const osi_core, * - Async/Sync: Sync * - Required Privileges: None * - API Group: - * - Initialization: No + * - Initialization: Yes * - Run time: Yes * - De-initialization: No * * @retval 0 on success * @retval -1 on failure. */ -nve32_t osi_handle_ioctl(struct osi_core_priv_data *osi_core, - struct osi_ioctl *data); +nve32_t osi_write_phy_reg(struct osi_core_priv_data *const osi_core, + const nveu32_t phyaddr, const nveu32_t phyreg, + const nveu16_t phydata); /** - * @brief osi_get_core - Get pointer to osi_core data structure. + * @brief osi_read_phy_reg - Read from a PHY register through MAC over MDIO bus. * * @note * Algorithm: - * - Returns OSI core data structure. + * - Before proceeding for reading for PHY register check whether any MII + * operation going on MDIO bus by polling MAC_GMII_BUSY bit. + * - Populate required parameters like phy address, phy register etc,, + * in program it in MAC MDIO Address register. Read and GMII busy bits + * needs to be set in this operation. + * - Write into MAC MDIO address register poll for GMII busy for MDIO + * operation to complete. After this data will be available at MAC MDIO + * data register. * - * @pre OSD layer should use this as first API to get osi_core pointer and - * use the same in remaning API invocation. + * @param[in] osi_core: OSI core private data structure. + * @param[in] phyaddr: PHY address (PHY ID) associated with PHY + * @param[in] phyreg: Register which needs to be read from PHY. + * + * @pre MAC should be init and started. see osi_start_mac() + * + * @note + * Traceability Details: + * - SWUD_ID: ETHERNET_NVETHERNETRM_003 + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: Yes + * - Run time: Yes + * - De-initialization: No + * + * @retval data from PHY register on success + * @retval -1 on failure + */ +nve32_t osi_read_phy_reg(struct osi_core_priv_data *const osi_core, + const nveu32_t phyaddr, + const nveu32_t phyreg); + +/** + * @brief initializing the core operations + * + * @param[in] osi_core: OSI core private data structure. + * + * @retval data from PHY register on success + * @retval -1 on failure * * @note * Traceability Details: + * - SWUD_ID: ETHERNET_NVETHERNETRM_001 * * @usage * - Allowed context for the API call @@ -2535,13 +1568,11 @@ nve32_t osi_handle_ioctl(struct osi_core_priv_data *osi_core, * - Run time: No * - De-initialization: No * - * @retval valid and unique osi_core pointer on success - * @retval NULL on failure. */ -struct osi_core_priv_data *osi_get_core(void); +nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core); /** - * @brief osi_hal_handle_ioctl - HW function API to handle runtime command + * @brief osi_handle_ioctl - API to handle runtime command * * @note * Algorithm: @@ -2549,14 +1580,8 @@ struct osi_core_priv_data *osi_get_core(void); * - OSI_CMD_MDC_CONFIG * Derive MDC clock based on provided AXI_CBB clk * arg1_u32 - CSR (AXI CBB) clock rate. - * - OSI_CMD_RESTORE_REGISTER - * Restore backup of MAC MMIO address space * - OSI_CMD_POLL_FOR_MAC_RST * Poll Software reset bit in MAC HW - * - OSI_CMD_START_MAC - * Start MAC Tx/Rx engine - * - OSI_CMD_STOP_MAC - * Stop MAC Tx/Rx engine * - OSI_CMD_COMMON_ISR * Common ISR handler * - OSI_CMD_PAD_CALIBRATION @@ -2567,13 +1592,9 @@ struct osi_core_priv_data *osi_get_core(void); * - OSI_CMD_GET_MAC_VER * Reading MAC version * arg1_u32 - holds mac version - * - OSI_CMD_VALIDATE_CORE_REG - * Read-validate HW registers for func safety * - OSI_CMD_RESET_MMC * invoke function to reset MMC counter and data * structure - * - OSI_CMD_SAVE_REGISTER - * Take backup of MAC MMIO address space * - OSI_CMD_MAC_LB * Configure MAC loopback * - OSI_CMD_FLOW_CTRL @@ -2705,150 +1726,21 @@ struct osi_core_priv_data *osi_get_core(void); * @retval 0 on success * @retval -1 on failure. */ -nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, - struct osi_ioctl *data); -/** - * @brief osi_hal_hw_core_init - HW API for EQOS MAC, MTL and common DMA - * initialization. - * - * @note - * Algorithm: - * - Invokes EQOS MAC, MTL and common DMA register init code. - * - * @param[in, out] osi_core: OSI core private data structure. - * @param[in] tx_fifo_size: OSI core private data structure. - * @param[in] rx_fifo_size: OSI core private data structure. - * - * @pre - * - MAC should be out of reset. See osi_poll_for_mac_reset_complete() - * for details. - * - osi_core->base needs to be filled based on ioremap. - * - osi_core->num_mtl_queues needs to be filled. - * - osi_core->mtl_queues[qinx] need to be filled. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETRM_006 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_hal_hw_core_init(struct osi_core_priv_data *const osi_core, - nveu32_t tx_fifo_size, nveu32_t rx_fifo_size); - -/** - * @brief osi_hal_hw_core_deinit - HW API for MAC deinitialization. - * - * @note - * Algorithm: - * - Stops MAC transmission and reception. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC has to be out of reset. - * - * @note - * Traceability Details: - * - SWUD_ID: TODO - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_hal_hw_core_deinit(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_hal_write_phy_reg - HW API to Write to a PHY register through MAC - * over MDIO bus. - * - * @note - * Algorithm: - * - Before proceeding for reading for PHY register check whether any MII - * operation going on MDIO bus by polling MAC_GMII_BUSY bit. - * - Program data into MAC MDIO data register. - * - Populate required parameters like phy address, phy register etc,, - * in MAC MDIO Address register. write and GMII busy bits needs to be set - * in this operation. - * - Write into MAC MDIO address register poll for GMII busy for MDIO - * operation to complete. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] phyaddr: PHY address (PHY ID) associated with PHY - * @param[in] phyreg: Register which needs to be write to PHY. - * @param[in] phydata: Data to write to a PHY register. - * - * @pre MAC should be init and started. see osi_start_mac() - * - * @note - * Traceability Details: - * - SWUD_ID: TODO - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_hal_write_phy_reg(struct osi_core_priv_data *const osi_core, - const nveu32_t phyaddr, const nveu32_t phyreg, - const nveu16_t phydata); +nve32_t osi_handle_ioctl(struct osi_core_priv_data *osi_core, + struct osi_ioctl *data); /** - * @brief osi_hal_read_phy_reg - HW API to Read from a PHY register through MAC - * over MDIO bus. + * @brief osi_get_core - Get pointer to osi_core data structure. * * @note * Algorithm: - * - Before proceeding for reading for PHY register check whether any MII - * operation going on MDIO bus by polling MAC_GMII_BUSY bit. - * - Populate required parameters like phy address, phy register etc,, - * in program it in MAC MDIO Address register. Read and GMII busy bits - * needs to be set in this operation. - * - Write into MAC MDIO address register poll for GMII busy for MDIO - * operation to complete. After this data will be available at MAC MDIO - * data register. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] phyaddr: PHY address (PHY ID) associated with PHY - * @param[in] phyreg: Register which needs to be read from PHY. + * - Returns OSI core data structure. * - * @pre MAC should be init and started. see osi_start_mac() + * @pre OSD layer should use this as first API to get osi_core pointer and + * use the same in remaning API invocation. * * @note * Traceability Details: - * - SWUD_ID: TODO * * @usage * - Allowed context for the API call @@ -2859,13 +1751,11 @@ nve32_t osi_hal_write_phy_reg(struct osi_core_priv_data *const osi_core, * - Required Privileges: None * - API Group: * - Initialization: Yes - * - Run time: Yes + * - Run time: No * - De-initialization: No * - * @retval data from PHY register on success - * @retval -1 on failure + * @retval valid and unique osi_core pointer on success + * @retval NULL on failure. */ -nve32_t osi_hal_read_phy_reg(struct osi_core_priv_data *const osi_core, - const nveu32_t phyaddr, const nveu32_t phyreg); +struct osi_core_priv_data *osi_get_core(void); #endif /* INCLUDED_OSI_CORE_H */ - diff --git a/kernel/nvethernetrm/include/osi_dma.h b/kernel/nvethernetrm/include/osi_dma.h index 934784f71f..9151c3914c 100644 --- a/kernel/nvethernetrm/include/osi_dma.h +++ b/kernel/nvethernetrm/include/osi_dma.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -45,7 +45,6 @@ * @brief EQOS generic helper MACROS. * @{ */ -#define OSI_NET_IP_ALIGN 0x2U #define NV_VLAN_HLEN 0x4U #define OSI_ETH_HLEN 0xEU @@ -67,6 +66,7 @@ #define OSI_VM_IRQ_RX_CHAN_MASK(x) OSI_BIT(((x) * 2U) + 1U) /** @} */ +#ifdef LOG_OSI /** * OSI error macro definition, * @param[in] priv: OSD private data OR NULL @@ -94,6 +94,10 @@ OSI_LOG_INFO, type, err, loga);\ } #endif /* !OSI_STRIPPED_LIB */ +#else +#define OSI_DMA_ERR(priv, type, err, loga) +#endif /* LOG_OSI */ + /** * @addtogroup EQOS-PKT Packet context fields * @@ -119,7 +123,9 @@ /** Paged buffer */ #define OSI_PKT_CX_PAGED_BUF OSI_BIT(4) /** Rx packet has RSS hash */ +#ifndef OSI_STRIPPED_LIB #define OSI_PKT_CX_RSS OSI_BIT(5) +#endif /* !OSI_STRIPPED_LIB */ /** Valid packet */ #define OSI_PKT_CX_VALID OSI_BIT(10) /** Update Packet Length in Tx Desc3 */ @@ -128,18 +134,18 @@ #define OSI_PKT_CX_IP_CSUM OSI_BIT(12) /** @} */ +#ifndef OSI_STRIPPED_LIB /** * @addtogroup SLOT function context fields * * @brief These flags are used for DMA channel Slot context configuration * @{ */ -#ifndef OSI_STRIPPED_LIB #define OSI_SLOT_INTVL_DEFAULT 125U #define OSI_SLOT_INTVL_MAX 4095U -#endif /* !OSI_STRIPPED_LIB */ #define OSI_SLOT_NUM_MAX 16U /** @} */ +#endif /* !OSI_STRIPPED_LIB */ /** * @addtogroup EQOS-TX Tx done packet context fields @@ -147,7 +153,7 @@ * @brief These flags used to convey transmit done packet context information, * whether transmitted packet used a paged buffer, whether transmitted packet * has an tx error, whether transmitted packet has an TS - * + * * @{ */ /** Flag to indicate if buffer programmed in desc. is DMA map'd from @@ -209,7 +215,7 @@ /** @} */ - +#ifndef OSI_STRIPPED_LIB /** * @addtogroup RSS-HASH type * @@ -221,6 +227,7 @@ #define OSI_RX_PKT_HASH_TYPE_L3 0x2U #define OSI_RX_PKT_HASH_TYPE_L4 0x3U /** @} */ +#endif /* !OSI_STRIPPED_LIB */ /** * @addtogroup OSI-INTR OSI DMA interrupt handling macros. @@ -244,6 +251,7 @@ #ifdef OSI_DEBUG #define OSI_DMA_IOCTL_CMD_REG_DUMP 1U #define OSI_DMA_IOCTL_CMD_STRUCTS_DUMP 2U +#define OSI_DMA_IOCTL_CMD_DEBUG_INTR_CONFIG 3U #endif /* OSI_DEBUG */ /** @} */ @@ -252,6 +260,7 @@ */ #define OSI_TX_MAX_BUFF_SIZE 0x3FFFU +#ifndef OSI_STRIPPED_LIB /** * @brief OSI packet error stats */ @@ -287,14 +296,15 @@ struct osi_pkt_err_stats { /** FRP Parsed count, includes accept * routing-bypass, or result-bypass count. */ - unsigned long frp_parsed; + nveu64_t frp_parsed; /** FRP Dropped count */ - unsigned long frp_dropped; + nveu64_t frp_dropped; /** FRP Parsing Error count */ - unsigned long frp_err; + nveu64_t frp_err; /** FRP Incomplete Parsing */ - unsigned long frp_incomplete; + nveu64_t frp_incomplete; }; +#endif /* !OSI_STRIPPED_LIB */ /** * @brief Receive Descriptor @@ -322,6 +332,8 @@ struct osi_rx_swcx { nveu32_t len; /** Flags to share info about Rx swcx between OSD and OSI */ nveu32_t flags; + /** nvsocket data index */ + nveu64_t data_idx; }; /** @@ -333,16 +345,18 @@ struct osi_rx_pkt_cx { nveu32_t flags; /** Stores the Rx csum */ nveu32_t rxcsum; - /** Stores the VLAN tag ID in received packet */ - nveu32_t vlan_tag; /** Length of received packet */ nveu32_t pkt_len; + /** TS in nsec for the received packet */ + nveul64_t ns; +#ifndef OSI_STRIPPED_LIB + /** Stores the VLAN tag ID in received packet */ + nveu32_t vlan_tag; /** Stores received packet hash */ nveu32_t rx_hash; /** Store type of packet for which hash carries at rx_hash */ nveu32_t rx_hash_type; - /** TS in nsec for the received packet */ - nveul64_t ns; +#endif /* !OSI_STRIPPED_LIB */ }; /** @@ -374,20 +388,22 @@ struct osi_tx_swcx { void *buf_virt_addr; /** Length of buffer */ nveu32_t len; +#ifndef OSI_STRIPPED_LIB /** Flag to keep track of whether buffer pointed by buf_phy_addr * is a paged buffer/linear buffer */ nveu32_t is_paged_buf; +#endif /* !OSI_STRIPPED_LIB */ /** Flag to keep track of SWCX * Bit 0 is_paged_buf - whether buffer pointed by buf_phy_addr * is a paged buffer/linear buffer * Bit 1 PTP hwtime form timestamp registers */ - unsigned int flags; + nveu32_t flags; /** Packet id of packet for which TX timestamp needed */ - unsigned int pktid; + nveu32_t pktid; /** dma channel number for osd use */ nveu32_t chan; - /** reserved field 1 for future use */ - nveu64_t rsvd1; + /** nvsocket data index */ + nveu64_t data_idx; /** reserved field 2 for future use */ nveu64_t rsvd2; }; @@ -438,7 +454,7 @@ struct osi_txdone_pkt_cx { * bit is set in fields */ nveul64_t ns; /** Passing packet id to map TX time to packet */ - unsigned int pktid; + nveu32_t pktid; }; /** @@ -456,18 +472,23 @@ struct osi_tx_ring { nveu32_t cur_tx_idx; /** Descriptor index for descriptor cleanup */ nveu32_t clean_idx; +#ifndef OSI_STRIPPED_LIB /** Slot function check */ nveu32_t slot_check; /** Slot number */ nveu32_t slot_number; +#endif /* !OSI_STRIPPED_LIB */ /** Transmit packet context */ struct osi_tx_pkt_cx tx_pkt_cx; /** Transmit complete packet context information */ struct osi_txdone_pkt_cx txdone_pkt_cx; /** Number of packets or frames transmitted */ nveu32_t frame_cnt; + /** flag to skip memory barrier */ + nveu32_t skip_dmb; }; +#ifndef OSI_STRIPPED_LIB /** * @brief osi_xtra_dma_stat_counters - OSI DMA extra stats counters */ @@ -489,6 +510,7 @@ struct osi_xtra_dma_stat_counters { /** Total number of TSO packet count */ nveu64_t tx_tso_pkt_n; }; +#endif /* !OSI_STRIPPED_LIB */ struct osi_dma_priv_data; @@ -522,13 +544,17 @@ struct osd_dma_ops { #endif /* OSI_DEBUG */ }; +#ifdef OSI_DEBUG /** * @brief The OSI DMA IOCTL data structure. */ struct osi_dma_ioctl_data { /** IOCTL command number */ nveu32_t cmd; + /** IOCTL command argument */ + nveu32_t arg_u32; }; +#endif /* OSI_DEBUG */ /** * @brief The OSI DMA private data structure. @@ -552,10 +578,12 @@ struct osi_dma_priv_data { nveu32_t rx_buf_len; /** MTU size */ nveu32_t mtu; +#ifndef OSI_STRIPPED_LIB /** Packet error stats */ struct osi_pkt_err_stats pkt_err_stats; /** Extra DMA stats */ struct osi_xtra_dma_stat_counters dstats; +#endif /* !OSI_STRIPPED_LIB */ /** Receive Interrupt Watchdog Timer Count Units */ nveu32_t rx_riwt; /** Flag which decides riwt is enabled(1) or disabled(0) */ @@ -572,33 +600,30 @@ struct osi_dma_priv_data { nveu32_t tx_frames; /** Flag which decides tx_frames is enabled(1) or disabled(0) */ nveu32_t use_tx_frames; + /** DMA callback ops structure */ + struct osd_dma_ops osd_ops; +#ifndef OSI_STRIPPED_LIB /** Flag which decides virtualization is enabled(1) or disabled(0) */ nveu32_t use_virtualization; - /** Functional safety config to do periodic read-verify of - * certain safety critical dma registers */ - void *safety_config; /** Array of DMA channel slot snterval value from DT */ nveu32_t slot_interval[OSI_MGBE_MAX_NUM_CHANS]; /** Array of DMA channel slot enabled status from DT*/ nveu32_t slot_enabled[OSI_MGBE_MAX_NUM_CHANS]; - /** DMA callback ops structure */ - struct osd_dma_ops osd_ops; /** Virtual address of reserved DMA buffer */ void *resv_buf_virt_addr; /** Physical address of reserved DMA buffer */ nveu64_t resv_buf_phy_addr; - /** Tegra Pre-si platform info */ - nveu32_t pre_si; +#endif /* !OSI_STRIPPED_LIB */ /** PTP flags * OSI_PTP_SYNC_MASTER - acting as master * OSI_PTP_SYNC_SLAVE - acting as slave * OSI_PTP_SYNC_ONESTEP - one-step mode * OSI_PTP_SYNC_TWOSTEP - two step mode */ - unsigned int ptp_flag; + nveu32_t ptp_flag; +#ifdef OSI_DEBUG /** OSI DMA IOCTL data */ struct osi_dma_ioctl_data ioctl_data; -#ifdef OSI_DEBUG /** Flag to enable/disable descriptor dump */ nveu32_t enable_desc_dump; #endif /* OSI_DEBUG */ @@ -610,158 +635,6 @@ struct osi_dma_priv_data { nveu32_t rx_ring_sz; }; -/** - * @brief osi_disable_chan_tx_intr - Disables DMA Tx channel interrupts. - * - * @note - * Algorithm: - * - Disables Tx interrupts at wrapper level. - * - * @param[in] osi_dma: OSI DMA private data. - * @param[in] chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured. - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OS Dependent layer and pass corresponding channel number. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETCL_001 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: Yes - * - Signal handler: Yes - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: Yes - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_disable_chan_tx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan); - -/** - * @brief osi_enable_chan_tx_intr - Enable DMA Tx channel interrupts. - * - * @note - * Algorithm: - * - Enables Tx interrupts at wrapper level. - * - * @param[in] osi_dma: OSI DMA private data. - * @param[in] chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured. - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OS Dependent layer and pass corresponding channel number. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETCL_002 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: Yes - * - Signal handler: Yes - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_enable_chan_tx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan); - -/** - * @brief osi_disable_chan_rx_intr - Disable DMA Rx channel interrupts. - * - * @note - * Algorithm: - * - Disables Rx interrupts at wrapper level. - * - * @param[in] osi_dma: OSI DMA private data. - * @param[in] chan: DMA Rx channel number. Max OSI_EQOS_MAX_NUM_CHANS. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured. - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OS Dependent layer and pass corresponding channel number. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETCL_003 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: Yes - * - Signal handler: Yes - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: Yes - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_disable_chan_rx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan); - -/** - * @brief osi_enable_chan_rx_intr - Enable DMA Rx channel interrupts. - * - * @note - * Algorithm: - * - Enables Rx interrupts at wrapper level. - * - * @param[in] osi_dma: OSI DMA private data. - * @param[in] chan: DMA Rx channel number. Max OSI_EQOS_MAX_NUM_CHANS. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured. - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OS Dependent layer and pass corresponding channel number. - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETCL_004 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: Yes - * - Signal handler: Yes - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_enable_chan_rx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan); - /** * @brief osi_get_global_dma_status - Gets DMA status. * @@ -777,114 +650,6 @@ nve32_t osi_enable_chan_rx_intr(struct osi_dma_priv_data *osi_dma, */ nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma); -/** - * @brief osi_clear_vm_tx_intr - Handles VM Tx interrupt source. - * - * Algorithm: Clear Tx interrupt source at wrapper level and DMA level. - * - * @param[in] osi_dma: DMA private data. - * @param[in] chan: DMA tx channel number. - * - * @note - * 1) MAC needs to be out of reset and proper clocks need to be configured. - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_clear_vm_tx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan); - -/** - * @brief osi_clear_vm_rx_intr - Handles VM Rx interrupt source. - * - * Algorithm: Clear Rx interrupt source at wrapper level and DMA level. - * - * @param[in] osi_dma: DMA private data. - * @param[in] chan: DMA rx channel number. - * - * @note - * 1) MAC needs to be out of reset and proper clocks need to be configured. - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - * 3) Mapping of physical IRQ line to DMA channel need to be maintained at - * OS Dependent layer and pass corresponding channel number. - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_clear_vm_rx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan); - -/** - * @brief Start DMA - * - * @note - * Algorithm: - * - Start the DMA for specific MAC - * - * @param[in] osi_dma: OSI DMA private data. - * @param[in] chan: DMA Tx/Rx channel number. Max OSI_EQOS_MAX_NUM_CHANS. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured. - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETCL_005 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_start_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan); - -/** - * @brief osi_stop_dma - Stop DMA - * - * @note - * Algorithm: - * - Stop the DMA for specific MAC - * - * @param[in] osi_dma: OSI DMA private data. - * @param[in] chan: DMA Tx/Rx channel number. Max OSI_EQOS_MAX_NUM_CHANS. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured. - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * Traceability Details: - * - SWUD_ID: ETHERNET_NVETHERNETCL_006 - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan); - /** * @brief osi_get_refill_rx_desc_cnt - Rx descriptors count that needs to refill * @@ -913,8 +678,8 @@ nve32_t osi_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan); * * @retval "Number of available free descriptors." */ -nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma, - unsigned int chan); +nveu32_t osi_get_refill_rx_desc_cnt(const struct osi_dma_priv_data *const osi_dma, + nveu32_t chan); /** * @brief osi_rx_dma_desc_init - DMA Rx descriptor init @@ -1349,6 +1114,7 @@ nveu32_t osi_is_mac_enabled(struct osi_dma_priv_data *const osi_dma); nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma, nveu32_t chan, nveu32_t tx_rx, nveu32_t en_dis); +#ifdef OSI_DEBUG /** * @brief osi_dma_ioctl - OSI DMA IOCTL * @@ -1365,44 +1131,8 @@ nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma, * @retval -1 on failure. */ nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma); +#endif /* OSI_DEBUG */ #ifndef OSI_STRIPPED_LIB -/** - * @brief - Read-validate HW registers for func safety. - * - * @note - * Algorithm: - * - Reads pre-configured list of DMA configuration registers - * and compares with last written value for any modifications. - * - * @param[in] osi_dma: OSI DMA private data structure. - * - * @pre - * - MAC has to be out of reset. - * - osi_hw_dma_init has to be called. Internally this would initialize - * the safety_config (see osi_dma_priv_data) based on MAC version and - * which specific registers needs to be validated periodically. - * - Invoke this call if (osi_dma_priv_data->safety_config != OSI_NULL) - * - * @note - * Traceability Details: - * - * @usage - * - Allowed context for the API call - * - Interrupt handler: No - * - Signal handler: No - * - Thread safe: No - * - Async/Sync: Sync - * - Required Privileges: None - * - API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -nve32_t osi_validate_dma_regs(struct osi_dma_priv_data *osi_dma); - /** * @brief osi_clear_tx_pkt_err_stats - Clear tx packet error stats. * diff --git a/kernel/nvethernetrm/include/osi_dma_txrx.h b/kernel/nvethernetrm/include/osi_dma_txrx.h index 97b360725f..65d402d45d 100644 --- a/kernel/nvethernetrm/include/osi_dma_txrx.h +++ b/kernel/nvethernetrm/include/osi_dma_txrx.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,7 +32,6 @@ #define OSI_EQOS_TX_DESC_CNT 1024U #define OSI_EQOS_RX_DESC_CNT 1024U #define OSI_MGBE_TX_DESC_CNT 4096U -#define OSI_MGBE_RX_DESC_CNT 4096U #define OSI_MGBE_MAX_RX_DESC_CNT 16384U /** @} */ @@ -49,9 +48,11 @@ #define INCR_TX_DESC_INDEX(idx, x) ((idx) = ((idx) + (1U)) & ((x) - 1U)) /** Increment the rx descriptor index */ #define INCR_RX_DESC_INDEX(idx, x) ((idx) = ((idx) + (1U)) & ((x) - 1U)) -#ifndef OSI_STRIPPED_LIB +#ifdef OSI_DEBUG /** Decrement the tx descriptor index */ #define DECR_TX_DESC_INDEX(idx, x) ((idx) = ((idx) - (1U)) & ((x) - 1U)) +#endif /* OSI_DEBUG */ +#ifndef OSI_STRIPPED_LIB /** Decrement the rx descriptor index */ #define DECR_RX_DESC_INDEX(idx, x) ((idx) = ((idx) - (1U)) & ((x) - 1U)) #endif /* !OSI_STRIPPED_LIB */ diff --git a/kernel/nvethernetrm/include/osi_macsec.h b/kernel/nvethernetrm/include/osi_macsec.h index 8d98bd3f8b..d3598cdb98 100644 --- a/kernel/nvethernetrm/include/osi_macsec.h +++ b/kernel/nvethernetrm/include/osi_macsec.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -41,7 +41,9 @@ #define OSI_AN2_VALID OSI_BIT(2) #define OSI_AN3_VALID OSI_BIT(3) #define OSI_MAX_NUM_SA 4U +#ifdef DEBUG_MACSEC #define OSI_CURR_AN_MAX 3 +#endif /* DEBUG_MACSEC */ #define OSI_KEY_INDEX_MAX 31U #define OSI_PN_MAX_DEFAULT 0xFFFFFFFFU #define OSI_PN_THRESHOLD_DEFAULT 0xC0000000U @@ -97,7 +99,7 @@ /** @} */ /** - * @addtogroup Generic table CONFIG register helpers macros + * @addtogroup MACSEC-Generic table CONFIG register helpers macros * * @brief Helper macros for generic table CONFIG register programming * @{ @@ -114,14 +116,13 @@ #define OSI_SA_LUT_MAX_INDEX OSI_TABLE_INDEX_MAX /** @} */ +#ifdef DEBUG_MACSEC /** * @addtogroup Debug buffer table CONFIG register helpers macros * * @brief Helper macros for debug buffer table CONFIG register programming * @{ */ -#define OSI_DBG_TBL_READ OSI_LUT_READ -#define OSI_DBG_TBL_WRITE OSI_LUT_WRITE /* Num of Tx debug buffers */ #define OSI_TX_DBG_BUF_IDX_MAX 12U /* Num of Rx debug buffers */ @@ -140,6 +141,7 @@ #define OSI_RX_DBG_ICV_ERROR_EVT OSI_BIT(10) #define OSI_RX_DBG_CAPTURE_EVT OSI_BIT(11) /** @} */ +#endif /* DEBUG_MACSEC*/ /** * @addtogroup AES ciphers @@ -152,27 +154,22 @@ /** @} */ /** - * @addtogroup MACSEC Misc helper macro's + * @addtogroup MACSEC related helper MACROs * - * @brief MACSEC Helper macro's + * @brief MACSEC generic helper MACROs * @{ */ #define OSI_MACSEC_TX_EN OSI_BIT(0) #define OSI_MACSEC_RX_EN OSI_BIT(1) -/* MACSEC SECTAG + ICV + 2B ethertype adds upto 34B */ -#define MACSEC_TAG_ICV_LEN 34U -/* MACSEC TZ key config cmd */ -#define OSI_MACSEC_CMD_TZ_CONFIG 0x1 -/* MACSEC TZ key table entries reset cmd */ -#define OSI_MACSEC_CMD_TZ_KT_RESET 0x2 /** @} */ /** * @brief Indicates different operations on MACSEC SA */ +#ifdef MACSEC_KEY_PROGRAM #define OSI_CREATE_SA 1U +#endif /* MACSEC_KEY_PROGRAM */ #define OSI_ENABLE_SA 2U -#define OSI_DISABLE_SA 3U /** * @brief MACSEC SA State LUT entry outputs structure @@ -238,6 +235,7 @@ struct osi_macsec_table_config { nveu16_t index; }; +#if defined(MACSEC_KEY_PROGRAM) || defined(LINUX_OS) /** * @brief MACSEC Key Table entry structure */ @@ -247,6 +245,7 @@ struct osi_kt_entry { /** Indicates Hash-key */ nveu8_t h[OSI_KEY_LEN_128]; }; +#endif /* MACSEC_KEY_PROGRAM */ /** * @brief MACSEC BYP/SCI LUT entry inputs structure @@ -296,6 +295,7 @@ struct osi_macsec_lut_config { struct osi_sa_state_outputs sa_state_out; }; +#if defined(MACSEC_KEY_PROGRAM) || defined(LINUX_OS) /** * @brief MACSEC Key Table config data structure */ @@ -307,6 +307,7 @@ struct osi_macsec_kt_config { /** Indicates key table entry valid or not, bit 31 */ nveu32_t flags; }; +#endif /* MACSEC_KEY_PROGRAM */ /** * @brief MACSEC Debug buffer config data structure @@ -333,10 +334,8 @@ struct osi_macsec_core_ops { nveu32_t mtu); /** macsec de-init */ nve32_t (*deinit)(struct osi_core_priv_data *const osi_core); - /** Non Secure irq handler */ - void (*handle_ns_irq)(struct osi_core_priv_data *const osi_core); - /** Secure irq handler */ - void (*handle_s_irq)(struct osi_core_priv_data *const osi_core); + /** Macsec irq handler */ + void (*handle_irq)(struct osi_core_priv_data *const osi_core); /** macsec lut config */ nve32_t (*lut_config)(struct osi_core_priv_data *const osi_core, struct osi_macsec_lut_config *const lut_config); @@ -348,9 +347,11 @@ struct osi_macsec_core_ops { /** macsec cipher config */ nve32_t (*cipher_config)(struct osi_core_priv_data *const osi_core, nveu32_t cipher); +#ifdef DEBUG_MACSEC /** macsec loopback config */ nve32_t (*loopback_config)(struct osi_core_priv_data *const osi_core, nveu32_t enable); +#endif /* DEBUG_MACSEC */ /** macsec enable */ nve32_t (*macsec_en)(struct osi_core_priv_data *const osi_core, nveu32_t enable); @@ -361,19 +362,24 @@ struct osi_macsec_core_ops { nveu16_t *kt_idx); /** macsec read mmc counters */ void (*read_mmc)(struct osi_core_priv_data *const osi_core); +#ifdef DEBUG_MACSEC /** macsec debug buffer config */ nve32_t (*dbg_buf_config)(struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config); /** macsec debug buffer config */ nve32_t (*dbg_events_config)(struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config); +#endif /* DEBUG_MACSEC */ /** macsec get Key Index start for a given SCI */ nve32_t (*get_sc_lut_key_index)(struct osi_core_priv_data *const osi_core, nveu8_t *sci, nveu32_t *key_index, nveu16_t ctlr); /** macsec set MTU size */ nve32_t (*update_mtu)(struct osi_core_priv_data *const osi_core, nveu32_t mtu); - +#ifdef DEBUG_MACSEC + /** macsec interrupts configuration */ + void (*intr_config)(struct osi_core_priv_data *const osi_core, nveu32_t enable); +#endif /* DEBUG_MACSEC */ }; ////////////////////////////////////////////////////////////////////////// @@ -461,36 +467,12 @@ nve32_t osi_macsec_init(struct osi_core_priv_data *const osi_core, nve32_t osi_macsec_deinit(struct osi_core_priv_data *const osi_core); /** - * @brief osi_macsec_ns_isr - macsec non-secure irq handler - * - * @note - * Algorithm: - * - Return -1 if osi core or ops is null - * - handles non-secure macsec interrupts - * - Refer to MACSEC column of <<******, (sequence diagram)>> for API details. - * - TraceID: *********** - * - * @param[in] osi_core: OSI core private data structure - * - * @pre MACSEC needs to be out of reset and proper clock configured. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval none - */ -void osi_macsec_ns_isr(struct osi_core_priv_data *const osi_core); - -/** - * @brief osi_macsec_s_isr - macsec secure irq handler + * @brief osi_macsec_isr - macsec irq handler * * @note * Algorithm: * - Return -1 if osi core or ops is null - * - handles secure macsec interrupts + * - handles macsec interrupts * - Refer to MACSEC column of <<******, (sequence diagram)>> for API details. * - TraceID: *********** * @@ -506,7 +488,7 @@ void osi_macsec_ns_isr(struct osi_core_priv_data *const osi_core); * * @retval none */ -void osi_macsec_s_isr(struct osi_core_priv_data *const osi_core); +void osi_macsec_isr(struct osi_core_priv_data *const osi_core); /** * @brief osi_macsec_config_lut - Read or write to macsec LUTs @@ -535,6 +517,7 @@ void osi_macsec_s_isr(struct osi_core_priv_data *const osi_core); nve32_t osi_macsec_config_lut(struct osi_core_priv_data *const osi_core, struct osi_macsec_lut_config *const lut_config); +#ifdef MACSEC_KEY_PROGRAM /** * @brief osi_macsec_config_kt - API to read or update the keys * @@ -561,6 +544,7 @@ nve32_t osi_macsec_config_lut(struct osi_core_priv_data *const osi_core, */ nve32_t osi_macsec_config_kt(struct osi_core_priv_data *const osi_core, struct osi_macsec_kt_config *const kt_config); +#endif /* MACSEC_KEY_PROGRAM */ /** * @brief osi_macsec_cipher_config - API to update the cipher @@ -589,6 +573,7 @@ nve32_t osi_macsec_config_kt(struct osi_core_priv_data *const osi_core, nve32_t osi_macsec_cipher_config(struct osi_core_priv_data *const osi_core, nveu32_t cipher); +#ifdef DEBUG_MACSEC /** * @brief osi_macsec_loopback - API to enable/disable macsec loopback * @@ -613,8 +598,10 @@ nve32_t osi_macsec_cipher_config(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure */ + nve32_t osi_macsec_loopback(struct osi_core_priv_data *const osi_core, nveu32_t enable); +#endif /* DEBUG_MACSEC */ /** * @brief osi_macsec_en - API to enable/disable macsec @@ -657,6 +644,7 @@ nve32_t osi_macsec_en(struct osi_core_priv_data *const osi_core, * * @param[in] osi_core: OSI core private data structure * @param[in] sc: Pointer to the sc that needs to be added/deleted/updated + * @param[in] enable: macsec enable/disable selection * @param[in] ctlr: Controller selected * @param[out] kt_idx: Pointer to the kt_index passed to OSD * @@ -701,6 +689,7 @@ nve32_t osi_macsec_config(struct osi_core_priv_data *const osi_core, */ nve32_t osi_macsec_read_mmc(struct osi_core_priv_data *const osi_core); +#ifdef DEBUG_MACSEC /** * @brief osi_macsec_config_dbg_buf - Reads the debug buffer captured * @@ -756,7 +745,7 @@ nve32_t osi_macsec_config_dbg_buf( nve32_t osi_macsec_dbg_events_config( struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config); - +#endif /* DEBUG_MACSEC */ /** * @brief osi_macsec_get_sc_lut_key_index - API to get key index for a given SCI * diff --git a/kernel/nvethernetrm/osi/common/common.h b/kernel/nvethernetrm/osi/common/common.h index d2b9082d67..31de8d2c1f 100644 --- a/kernel/nvethernetrm/osi/common/common.h +++ b/kernel/nvethernetrm/osi/common/common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -22,11 +22,11 @@ #ifndef INCLUDED_COMMON_H #define INCLUDED_COMMON_H -#include "../osi/common/type.h" +#include #include /** - * @addtogroup Generic helper macros + * @addtogroup Generic helper MACROS * * @brief These are Generic helper macros used at various places. * @{ @@ -37,6 +37,12 @@ #define RETRY_DELAY 1U /** @} */ +/** MAC version type for EQOS version previous to 5.30 */ +#define MAC_CORE_VER_TYPE_EQOS 0U +/** MAC version type for EQOS version 5.30 */ +#define MAC_CORE_VER_TYPE_EQOS_5_30 1U +/** MAC version type for MGBE IP */ +#define MAC_CORE_VER_TYPE_MGBE 2U /** * @brief Maximum number of supported MAC IP types (EQOS and MGBE) @@ -48,8 +54,9 @@ * a condition is met or a timeout occurs * * @param[in] addr: Memory mapped address. + * @param[in] fn: function to be used. * @param[in] val: Variable to read the value. - * @param[in] cond: Break condition (usually involving @val). + * @param[in] cond: Break condition. * @param[in] delay_us: Maximum time to sleep between reads in us. * @param[in] retry: Retry count. @@ -60,9 +67,9 @@ */ #define osi_readl_poll_timeout(addr, fn, val, cond, delay_us, retry) \ ({ \ - unsigned int count = 0; \ + nveu32_t count = 0; \ while (count++ < retry) { \ - val = osi_readl((unsigned char *)addr); \ + val = osi_readl((nveu8_t *)addr); \ if ((cond)) { \ break; \ } \ @@ -234,7 +241,8 @@ static inline void osi_writela(OSI_UNUSED void *priv, nveu32_t val, void *addr) * @brief validate_mac_ver_update_chans - Validates mac version and update chan * * @param[in] mac_ver: MAC version read. - * @param[out] max_chans: Maximum channel number. + * @param[out] num_max_chans: Maximum channel number. + * @param[out] l_mac_ver: local mac version. * * @note MAC has to be out of reset. * @@ -248,26 +256,36 @@ static inline void osi_writela(OSI_UNUSED void *priv, nveu32_t val, void *addr) * @retval 1 - for Valid MAC */ static inline nve32_t validate_mac_ver_update_chans(nveu32_t mac_ver, - nveu32_t *max_chans) + nveu32_t *num_max_chans, + nveu32_t *l_mac_ver) { + nve32_t ret; + switch (mac_ver) { - case OSI_EQOS_MAC_4_10: case OSI_EQOS_MAC_5_00: - *max_chans = OSI_EQOS_XP_MAX_CHANS; + *num_max_chans = OSI_EQOS_XP_MAX_CHANS; + *l_mac_ver = MAC_CORE_VER_TYPE_EQOS; + ret = 1; break; case OSI_EQOS_MAC_5_30: - *max_chans = OSI_EQOS_MAX_NUM_CHANS; + *num_max_chans = OSI_EQOS_MAX_NUM_CHANS; + *l_mac_ver = MAC_CORE_VER_TYPE_EQOS_5_30; + ret = 1; break; - case OSI_MGBE_MAC_3_00: case OSI_MGBE_MAC_3_10: +#ifndef OSI_STRIPPED_LIB case OSI_MGBE_MAC_4_00: - *max_chans = OSI_MGBE_MAX_NUM_CHANS; +#endif /* !OSI_STRIPPED_LIB */ + *num_max_chans = OSI_MGBE_MAX_NUM_CHANS; + *l_mac_ver = MAC_CORE_VER_TYPE_MGBE; + ret = 1; break; default: - return 0; + ret = 0; + break; } - return 1; + return ret; } /** @@ -289,7 +307,7 @@ static inline void osi_memset(void *s, nveu32_t c, nveu64_t count) nveu64_t temp = count; if (s == OSI_NULL) { - return; + goto done; } xs = (nveu8_t *)s; while (temp != 0UL) { @@ -299,6 +317,8 @@ static inline void osi_memset(void *s, nveu32_t c, nveu64_t count) } temp--; } +done: + return; } /** @@ -314,38 +334,49 @@ static inline void osi_memset(void *s, nveu32_t c, nveu64_t count) * - Run time: Yes * - De-initialization: No */ -static inline nve32_t osi_memcpy(void *dest, void *src, nveu64_t n) +static inline nve32_t osi_memcpy(void *dest, const void *src, nveu64_t n) { - nve8_t *csrc = (nve8_t *)src; - nve8_t *cdest = (nve8_t *)dest; + nve8_t *cdest = dest; + const nve8_t *csrc = src; + nve32_t ret = 0; nveu64_t i = 0; - if (src == OSI_NULL || dest == OSI_NULL) { - return -1; + if ((src == OSI_NULL) || (dest == OSI_NULL)) { + ret = -1; + goto fail; } for (i = 0; i < n; i++) { cdest[i] = csrc[i]; } - return 0; +fail: + return ret; } -static inline nve32_t osi_memcmp(void *dest, void *src, nve32_t n) +static inline nve32_t osi_memcmp(const void *dest, const void *src, nve32_t n) { + const nve8_t *const cdest = dest; + const nve8_t *const csrc = src; + nve32_t ret = 0; nve32_t i; - nve8_t *csrc = (nve8_t *)src; - nve8_t *cdest = (nve8_t *)dest; - if (src == OSI_NULL || dest == OSI_NULL) - return -1; + if ((src == OSI_NULL) || (dest == OSI_NULL)) { + ret = -1; + goto fail; + } for (i = 0; i < n; i++) { if (csrc[i] < cdest[i]) { - return -1; + ret = -1; + goto fail; } else if (csrc[i] > cdest[i]) { - return 1; + ret = 1; + goto fail; + } else { + /* Do Nothing */ } } - return 0; +fail: + return ret; } #endif diff --git a/kernel/nvethernetrm/osi/common/mgbe_common.h b/kernel/nvethernetrm/osi/common/mgbe_common.h index 7ebffeb379..5ba83806a4 100644 --- a/kernel/nvethernetrm/osi/common/mgbe_common.h +++ b/kernel/nvethernetrm/osi/common/mgbe_common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,7 +24,7 @@ #define INCLUDED_MGBE_COMMON_H /** - * @addtogroup MGBE-MAC MGBE MAC common HW feature registers + * @addtogroup MGBE-MAC MAC register offsets * * @{ */ diff --git a/kernel/nvethernetrm/osi/common/osi_common.c b/kernel/nvethernetrm/osi/common/osi_common.c index 18df8ff4cf..cee520cf65 100644 --- a/kernel/nvethernetrm/osi/common/osi_common.c +++ b/kernel/nvethernetrm/osi/common/osi_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -31,7 +31,7 @@ void common_get_systime_from_mac(void *addr, nveu32_t mac, nveu32_t *sec, nveu64_t remain; nveul64_t ns; typedef nveul64_t (*get_time)(void *addr); - get_time i_ops[MAX_MAC_IP_TYPES] = { + const get_time i_ops[MAX_MAC_IP_TYPES] = { eqos_get_systime_from_mac, mgbe_get_systime_from_mac }; @@ -53,7 +53,7 @@ void common_get_systime_from_mac(void *addr, nveu32_t mac, nveu32_t *sec, nveu32_t common_is_mac_enabled(void *addr, nveu32_t mac) { typedef nveu32_t (*mac_enable_arr)(void *addr); - mac_enable_arr i_ops[MAX_MAC_IP_TYPES] = { + const mac_enable_arr i_ops[MAX_MAC_IP_TYPES] = { eqos_is_mac_enabled, mgbe_is_mac_enabled }; diff --git a/kernel/nvethernetrm/osi/core/Makefile.interface.tmk b/kernel/nvethernetrm/osi/core/Makefile.interface.tmk index 46379794c9..be368ac5ae 100644 --- a/kernel/nvethernetrm/osi/core/Makefile.interface.tmk +++ b/kernel/nvethernetrm/osi/core/Makefile.interface.tmk @@ -1,6 +1,6 @@ ################################### tell Emacs this is a -*- makefile-gmake -*- # -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -24,13 +24,12 @@ # ############################################################################### -ifdef NV_INTERFACE_FLAG_SHARED_LIBRARY_SECTION -NV_INTERFACE_NAME := nvethernetrm -NV_INTERFACE_EXPORTS := lib$(NV_INTERFACE_NAME) +ifdef NV_INTERFACE_FLAG_STATIC_LIBRARY_SECTION +NV_COMPONENT_NAME := nvethernetrm +NV_INTERFACE_COMPONENT_DIR := . NV_INTERFACE_PUBLIC_INCLUDES := \ ./include endif - # Local Variables: # indent-tabs-mode: t # tab-width: 8 diff --git a/kernel/nvethernetrm/osi/core/Makefile.tmk b/kernel/nvethernetrm/osi/core/Makefile.tmk index 521160af03..a4fd775346 100644 --- a/kernel/nvethernetrm/osi/core/Makefile.tmk +++ b/kernel/nvethernetrm/osi/core/Makefile.tmk @@ -1,6 +1,6 @@ ################################### tell Emacs this is a -*- makefile-gmake -*- # -# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -22,7 +22,7 @@ # ############################################################################### -ifdef NV_COMPONENT_FLAG_SHARED_LIBRARY_SECTION +ifdef NV_COMPONENT_FLAG_STATIC_LIBRARY_SECTION include $(NV_BUILD_START_COMPONENT) NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1 @@ -30,42 +30,37 @@ NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1 NV_COMPONENT_NAME := nvethernetrm NV_COMPONENT_OWN_INTERFACE_DIR := . NV_COMPONENT_SOURCES := \ - eqos_core.c \ - eqos_mmc.c \ - osi_core.c \ - vlan_filter.c \ - osi_hal.c \ - ivc_core.c \ - frp.c \ - mgbe_core.c \ - xpcs.c \ - mgbe_mmc.c \ - debug.c \ - core_common.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/eqos_core.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/eqos_mmc.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/osi_core.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/osi_hal.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/ivc_core.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/frp.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/mgbe_core.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/xpcs.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/mgbe_mmc.c \ + $(NV_SOURCE)/nvethernetrm/osi/core/core_common.c \ $(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \ $(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \ $(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c \ $(NV_SOURCE)/nvethernetrm/osi/core/macsec.c -#NV_COMPONENT_CFLAGS += -DMACSEC_SUPPORT -#NV_COMPONENT_CFLAGS += -DMACSEC_KEY_PROGRAM -#NV_COMPONENT_CFLAGS += -DDEBUG_MACSEC +NV_COMPONENT_INCLUDES := \ + $(NV_SOURCE)/nvethernetrm/include \ + $(NV_SOURCE)/nvethernetrm/osi/common/include -ifeq ($(NV_BUILD_CONFIGURATION_OS_IS_LINUX),1) - NV_COMPONENT_CFLAGS += -DLINUX_OS -else ifeq ($(NV_BUILD_CONFIGURATION_OS_IS_QNX),1) - NV_COMPONENT_CFLAGS += -DQNX_OS -endif +include $(NV_SOURCE)/nvethernetrm/include/config.tmk -ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),0) -NV_COMPONENT_CFLAGS += -DOSI_DEBUG +ifeq ($(OSI_DEBUG),1) +NV_COMPONENT_SOURCES += $(NV_SOURCE)/nvethernetrm/osi/core/debug.c endif -NV_COMPONENT_INCLUDES := \ - $(NV_SOURCE)/nvethernetrm/include \ - $(NV_SOURCE)/nvethernetrm/osi/common/include +ifeq ($(OSI_STRIPPED_LIB),0) +NV_COMPONENT_SOURCES += \ + $(NV_SOURCE)/nvethernetrm/osi/core/vlan_filter.c +endif -include $(NV_BUILD_SHARED_LIBRARY) +include $(NV_BUILD_STATIC_LIBRARY) endif # Local Variables: diff --git a/kernel/nvethernetrm/osi/core/core_common.c b/kernel/nvethernetrm/osi/core/core_common.c index 0d218a6924..02fbfaf250 100644 --- a/kernel/nvethernetrm/osi/core/core_common.c +++ b/kernel/nvethernetrm/osi/core/core_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,12 +24,596 @@ #include "core_common.h" #include "mgbe_core.h" #include "eqos_core.h" +#include "xpcs.h" +#include "macsec.h" + +static inline nve32_t poll_check(struct osi_core_priv_data *const osi_core, nveu8_t *addr, + nveu32_t bit_check, nveu32_t *value) +{ + nveu32_t retry = RETRY_COUNT; + nve32_t cond = COND_NOT_MET; + nveu32_t count; + nve32_t ret = 0; + + /* Poll Until Poll Condition */ + count = 0; + while (cond == COND_NOT_MET) { + if (count > retry) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "poll_check: timeout\n", 0ULL); + ret = -1; + goto fail; + } + + count++; + + *value = osi_readla(osi_core, addr); + if ((*value & bit_check) == OSI_NONE) { + cond = COND_MET; + } else { + osi_core->osd_ops.udelay(OSI_DELAY_1000US); + } + } +fail: + return ret; +} + + +nve32_t hw_poll_for_swr(struct osi_core_priv_data *const osi_core) +{ + nveu32_t dma_mode_val = 0U; + const nveu32_t dma_mode[2] = { EQOS_DMA_BMR, MGBE_DMA_MODE }; + void *addr = osi_core->base; + + return poll_check(osi_core, ((nveu8_t *)addr + dma_mode[osi_core->mac]), + DMA_MODE_SWR, &dma_mode_val); +} + +void hw_start_mac(struct osi_core_priv_data *const osi_core) +{ + void *addr = osi_core->base; + nveu32_t value; + const nveu32_t mac_mcr_te_reg[2] = { EQOS_MAC_MCR, MGBE_MAC_TMCR }; + const nveu32_t mac_mcr_re_reg[2] = { EQOS_MAC_MCR, MGBE_MAC_RMCR }; + const nveu32_t set_bit_te[2] = { EQOS_MCR_TE, MGBE_MAC_TMCR_TE }; + const nveu32_t set_bit_re[2] = { EQOS_MCR_RE, MGBE_MAC_RMCR_RE }; + + value = osi_readla(osi_core, ((nveu8_t *)addr + mac_mcr_te_reg[osi_core->mac])); + value |= set_bit_te[osi_core->mac]; + osi_writela(osi_core, value, ((nveu8_t *)addr + mac_mcr_te_reg[osi_core->mac])); + + value = osi_readla(osi_core, ((nveu8_t *)addr + mac_mcr_re_reg[osi_core->mac])); + value |= set_bit_re[osi_core->mac]; + osi_writela(osi_core, value, ((nveu8_t *)addr + mac_mcr_re_reg[osi_core->mac])); +} + +void hw_stop_mac(struct osi_core_priv_data *const osi_core) +{ + void *addr = osi_core->base; + nveu32_t value; + const nveu32_t mac_mcr_te_reg[2] = { EQOS_MAC_MCR, MGBE_MAC_TMCR }; + const nveu32_t mac_mcr_re_reg[2] = { EQOS_MAC_MCR, MGBE_MAC_RMCR }; + const nveu32_t clear_bit_te[2] = { EQOS_MCR_TE, MGBE_MAC_TMCR_TE }; + const nveu32_t clear_bit_re[2] = { EQOS_MCR_RE, MGBE_MAC_RMCR_RE }; + + value = osi_readla(osi_core, ((nveu8_t *)addr + mac_mcr_te_reg[osi_core->mac])); + value &= ~clear_bit_te[osi_core->mac]; + osi_writela(osi_core, value, ((nveu8_t *)addr + mac_mcr_te_reg[osi_core->mac])); + + value = osi_readla(osi_core, ((nveu8_t *)addr + mac_mcr_re_reg[osi_core->mac])); + value &= ~clear_bit_re[osi_core->mac]; + osi_writela(osi_core, value, ((nveu8_t *)addr + mac_mcr_re_reg[osi_core->mac])); +} + +nve32_t hw_set_mode(struct osi_core_priv_data *const osi_core, const nve32_t mode) +{ + void *base = osi_core->base; + nveu32_t mcr_val; + nve32_t ret = 0; + const nveu32_t bit_set[2] = { EQOS_MCR_DO, EQOS_MCR_DM }; + const nveu32_t clear_bit[2] = { EQOS_MCR_DM, EQOS_MCR_DO }; + + /* don't allow only if loopback mode is other than 0 or 1 */ + if ((mode != OSI_FULL_DUPLEX) && (mode != OSI_HALF_DUPLEX)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid duplex mode\n", 0ULL); + ret = -1; + goto fail; + } + + if (osi_core->mac == OSI_MAC_HW_EQOS) { + mcr_val = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_MCR); + mcr_val |= bit_set[mode]; + mcr_val &= ~clear_bit[mode]; + osi_writela(osi_core, mcr_val, ((nveu8_t *)base + EQOS_MAC_MCR)); + } +fail: + return ret; +} + +nve32_t hw_set_speed(struct osi_core_priv_data *const osi_core, const nve32_t speed) +{ + nveu32_t value; + nve32_t ret = 0; + void *base = osi_core->base; + const nveu32_t mac_mcr[2] = { EQOS_MAC_MCR, MGBE_MAC_TMCR }; + + if (((osi_core->mac == OSI_MAC_HW_EQOS) && (speed > OSI_SPEED_1000)) || + ((osi_core->mac == OSI_MAC_HW_MGBE) && ((speed < OSI_SPEED_2500) || + (speed > OSI_SPEED_10000)))) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "unsupported speed\n", (nveul64_t)speed); + ret = -1; + goto fail; + } + + value = osi_readla(osi_core, ((nveu8_t *)base + mac_mcr[osi_core->mac])); + switch (speed) { + case OSI_SPEED_10: + value |= EQOS_MCR_PS; + value &= ~EQOS_MCR_FES; + break; + case OSI_SPEED_100: + value |= EQOS_MCR_PS; + value |= EQOS_MCR_FES; + break; + case OSI_SPEED_1000: + value &= ~EQOS_MCR_PS; + value &= ~EQOS_MCR_FES; + break; + case OSI_SPEED_2500: + value |= MGBE_MAC_TMCR_SS_2_5G; + break; + case OSI_SPEED_5000: + value |= MGBE_MAC_TMCR_SS_5G; + break; + case OSI_SPEED_10000: + value &= ~MGBE_MAC_TMCR_SS_10G; + break; + default: + if (osi_core->mac == OSI_MAC_HW_EQOS) { + value &= ~EQOS_MCR_PS; + value &= ~EQOS_MCR_FES; + } else if (osi_core->mac == OSI_MAC_HW_MGBE) { + value &= ~MGBE_MAC_TMCR_SS_10G; + } else { + /* Do Nothing */ + } + break; + } + osi_writela(osi_core, value, ((nveu8_t *)osi_core->base + mac_mcr[osi_core->mac])); + + if (osi_core->mac == OSI_MAC_HW_MGBE) { + ret = xpcs_init(osi_core); + if (ret < 0) { + goto fail; + } + + ret = xpcs_start(osi_core); + if (ret < 0) { + goto fail; + } + } +fail: + return ret; +} + + +nve32_t hw_flush_mtl_tx_queue(struct osi_core_priv_data *const osi_core, + const nveu32_t q_inx) +{ + void *addr = osi_core->base; + nveu32_t tx_op_mode_val = 0U; + nveu32_t que_idx = (q_inx & 0xFU); + nveu32_t value; + const nveu32_t tx_op_mode[2] = { EQOS_MTL_CHX_TX_OP_MODE(que_idx), + MGBE_MTL_CHX_TX_OP_MODE(que_idx)}; + + /* Read Tx Q Operating Mode Register and flush TxQ */ + value = osi_readla(osi_core, ((nveu8_t *)addr + tx_op_mode[osi_core->mac])); + value |= MTL_QTOMR_FTQ; + osi_writela(osi_core, value, ((nveu8_t *)addr + tx_op_mode[osi_core->mac])); + + /* Poll Until FTQ bit resets for Successful Tx Q flush */ + return poll_check(osi_core, ((nveu8_t *)addr + tx_op_mode[osi_core->mac]), + MTL_QTOMR_FTQ, &tx_op_mode_val); +} + +nve32_t hw_config_fw_err_pkts(struct osi_core_priv_data *osi_core, + const nveu32_t q_inx, const nveu32_t enable_fw_err_pkts) +{ + nveu32_t val; + nve32_t ret = 0; + nveu32_t que_idx = (q_inx & 0xFU); + const nveu32_t rx_op_mode[2] = { EQOS_MTL_CHX_RX_OP_MODE(que_idx), + MGBE_MTL_CHX_RX_OP_MODE(que_idx)}; +#ifndef OSI_STRIPPED_LIB + const nveu32_t max_q[2] = { OSI_EQOS_MAX_NUM_QUEUES, + OSI_MGBE_MAX_NUM_QUEUES}; + /* Check for valid enable_fw_err_pkts and que_idx values */ + if (((enable_fw_err_pkts != OSI_ENABLE) && + (enable_fw_err_pkts != OSI_DISABLE)) || + (que_idx >= max_q[osi_core->mac])) { + ret = -1; + goto fail; + } + + /* Read MTL RXQ Operation_Mode Register */ + val = osi_readla(osi_core, ((nveu8_t *)osi_core->base + + rx_op_mode[osi_core->mac])); + + /* enable_fw_err_pkts, 1 is for enable and 0 is for disable */ + if (enable_fw_err_pkts == OSI_ENABLE) { + /* When enable_fw_err_pkts bit is set, all packets except + * the runt error packets are forwarded to the application + * or DMA. + */ + val |= MTL_RXQ_OP_MODE_FEP; + } else { + /* When this bit is reset, the Rx queue drops packets with error + * status (CRC error, GMII_ER, watchdog timeout, or overflow) + */ + val &= ~MTL_RXQ_OP_MODE_FEP; + } + + /* Write to FEP bit of MTL RXQ Operation Mode Register to enable or + * disable the forwarding of error packets to DMA or application. + */ + osi_writela(osi_core, val, ((nveu8_t *)osi_core->base + + rx_op_mode[osi_core->mac])); +fail: + return ret; +#else + /* using void to skip the misra error of unused variable */ + (void)enable_fw_err_pkts; + /* Read MTL RXQ Operation_Mode Register */ + val = osi_readla(osi_core, ((nveu8_t *)osi_core->base + + rx_op_mode[osi_core->mac])); + val |= MTL_RXQ_OP_MODE_FEP; + /* Write to FEP bit of MTL RXQ Operation Mode Register to enable or + * disable the forwarding of error packets to DMA or application. + */ + osi_writela(osi_core, val, ((nveu8_t *)osi_core->base + + rx_op_mode[osi_core->mac])); + + return ret; +#endif /* !OSI_STRIPPED_LIB */ +} + +nve32_t hw_config_rxcsum_offload(struct osi_core_priv_data *const osi_core, + nveu32_t enabled) +{ + void *addr = osi_core->base; + nveu32_t value; + nve32_t ret = 0; + const nveu32_t rxcsum_mode[2] = { EQOS_MAC_MCR, MGBE_MAC_RMCR}; + const nveu32_t ipc_value[2] = { EQOS_MCR_IPC, MGBE_MAC_RMCR_IPC}; + + if ((enabled != OSI_ENABLE) && (enabled != OSI_DISABLE)) { + ret = -1; + goto fail; + } + + value = osi_readla(osi_core, ((nveu8_t *)addr + rxcsum_mode[osi_core->mac])); + if (enabled == OSI_ENABLE) { + value |= ipc_value[osi_core->mac]; + } else { + value &= ~ipc_value[osi_core->mac]; + } + + osi_writela(osi_core, value, ((nveu8_t *)addr + rxcsum_mode[osi_core->mac])); +fail: + return ret; +} + +nve32_t hw_set_systime_to_mac(struct osi_core_priv_data *const osi_core, + const nveu32_t sec, const nveu32_t nsec) +{ + void *addr = osi_core->base; + nveu32_t mac_tcr = 0U; + nve32_t ret = 0; + const nveu32_t mac_tscr[2] = { EQOS_MAC_TCR, MGBE_MAC_TCR}; + const nveu32_t mac_stsur[2] = { EQOS_MAC_STSUR, MGBE_MAC_STSUR}; + const nveu32_t mac_stnsur[2] = { EQOS_MAC_STNSUR, MGBE_MAC_STNSUR}; + + ret = poll_check(osi_core, ((nveu8_t *)addr + mac_tscr[osi_core->mac]), + MAC_TCR_TSINIT, &mac_tcr); + if (ret == -1) { + goto fail; + } + + /* write seconds value to MAC_System_Time_Seconds_Update register */ + osi_writela(osi_core, sec, ((nveu8_t *)addr + mac_stsur[osi_core->mac])); + + /* write nano seconds value to MAC_System_Time_Nanoseconds_Update + * register + */ + osi_writela(osi_core, nsec, ((nveu8_t *)addr + mac_stnsur[osi_core->mac])); + + /* issue command to update the configured secs and nsecs values */ + mac_tcr |= MAC_TCR_TSINIT; + osi_writela(osi_core, mac_tcr, ((nveu8_t *)addr + mac_tscr[osi_core->mac])); + + ret = poll_check(osi_core, ((nveu8_t *)addr + mac_tscr[osi_core->mac]), + MAC_TCR_TSINIT, &mac_tcr); +fail: + return ret; +} + +nve32_t hw_config_addend(struct osi_core_priv_data *const osi_core, + const nveu32_t addend) +{ + void *addr = osi_core->base; + nveu32_t mac_tcr = 0U; + nve32_t ret = 0; + const nveu32_t mac_tscr[2] = { EQOS_MAC_TCR, MGBE_MAC_TCR}; + const nveu32_t mac_tar[2] = { EQOS_MAC_TAR, MGBE_MAC_TAR}; + + ret = poll_check(osi_core, ((nveu8_t *)addr + mac_tscr[osi_core->mac]), + MAC_TCR_TSADDREG, &mac_tcr); + if (ret == -1) { + goto fail; + } + + /* write addend value to MAC_Timestamp_Addend register */ + osi_writela(osi_core, addend, ((nveu8_t *)addr + mac_tar[osi_core->mac])); + + /* issue command to update the configured addend value */ + mac_tcr |= MAC_TCR_TSADDREG; + osi_writela(osi_core, mac_tcr, ((nveu8_t *)addr + mac_tscr[osi_core->mac])); + + ret = poll_check(osi_core, ((nveu8_t *)addr + mac_tscr[osi_core->mac]), + MAC_TCR_TSADDREG, &mac_tcr); +fail: + return ret; +} + +#ifndef OSI_STRIPPED_LIB +void hw_config_tscr(struct osi_core_priv_data *const osi_core, const nveu32_t ptp_filter) +#else +void hw_config_tscr(struct osi_core_priv_data *const osi_core, OSI_UNUSED const nveu32_t ptp_filter) +#endif /* !OSI_STRIPPED_LIB */ +{ + void *addr = osi_core->base; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + nveu32_t mac_tcr = 0U; +#ifndef OSI_STRIPPED_LIB + nveu32_t i = 0U, temp = 0U; +#endif /* !OSI_STRIPPED_LIB */ + nveu32_t value = 0x0U; + const nveu32_t mac_tscr[2] = { EQOS_MAC_TCR, MGBE_MAC_TCR}; + const nveu32_t mac_pps[2] = { EQOS_MAC_PPS_CTL, MGBE_MAC_PPS_CTL}; + +#ifndef OSI_STRIPPED_LIB + if (ptp_filter != OSI_DISABLE) { + mac_tcr = (OSI_MAC_TCR_TSENA | OSI_MAC_TCR_TSCFUPDT | OSI_MAC_TCR_TSCTRLSSR); + for (i = 0U; i < 32U; i++) { + temp = ptp_filter & OSI_BIT(i); + + switch (temp) { + case OSI_MAC_TCR_SNAPTYPSEL_1: + mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_1; + break; + case OSI_MAC_TCR_SNAPTYPSEL_2: + mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_2; + break; + case OSI_MAC_TCR_SNAPTYPSEL_3: + mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_3; + break; + case OSI_MAC_TCR_TSIPV4ENA: + mac_tcr |= OSI_MAC_TCR_TSIPV4ENA; + break; + case OSI_MAC_TCR_TSIPV6ENA: + mac_tcr |= OSI_MAC_TCR_TSIPV6ENA; + break; + case OSI_MAC_TCR_TSEVENTENA: + mac_tcr |= OSI_MAC_TCR_TSEVENTENA; + break; + case OSI_MAC_TCR_TSMASTERENA: + mac_tcr |= OSI_MAC_TCR_TSMASTERENA; + break; + case OSI_MAC_TCR_TSVER2ENA: + mac_tcr |= OSI_MAC_TCR_TSVER2ENA; + break; + case OSI_MAC_TCR_TSIPENA: + mac_tcr |= OSI_MAC_TCR_TSIPENA; + break; + case OSI_MAC_TCR_AV8021ASMEN: + mac_tcr |= OSI_MAC_TCR_AV8021ASMEN; + break; + case OSI_MAC_TCR_TSENALL: + mac_tcr |= OSI_MAC_TCR_TSENALL; + break; + case OSI_MAC_TCR_CSC: + mac_tcr |= OSI_MAC_TCR_CSC; + break; + default: + break; + } + } + } else { + /* Disabling the MAC time stamping */ + mac_tcr = OSI_DISABLE; + } +#else + mac_tcr = (OSI_MAC_TCR_TSENA | OSI_MAC_TCR_TSCFUPDT | OSI_MAC_TCR_TSCTRLSSR + | OSI_MAC_TCR_TSVER2ENA | OSI_MAC_TCR_TSIPENA | OSI_MAC_TCR_TSIPV6ENA | + OSI_MAC_TCR_TSIPV4ENA | OSI_MAC_TCR_SNAPTYPSEL_1); +#endif /* !OSI_STRIPPED_LIB */ + + osi_writela(osi_core, mac_tcr, ((nveu8_t *)addr + mac_tscr[osi_core->mac])); + + value = osi_readla(osi_core, (nveu8_t *)addr + mac_pps[osi_core->mac]); + value &= ~MAC_PPS_CTL_PPSCTRL0; + if (l_core->pps_freq == OSI_ENABLE) { + value |= OSI_ENABLE; + } + osi_writela(osi_core, value, ((nveu8_t *)addr + mac_pps[osi_core->mac])); +} + +void hw_config_ssir(struct osi_core_priv_data *const osi_core) +{ + nveu32_t val = 0U; + void *addr = osi_core->base; + const struct core_local *l_core = (struct core_local *)(void *)osi_core; + const nveu32_t mac_ssir[2] = { EQOS_MAC_SSIR, MGBE_MAC_SSIR}; + const nveu32_t ptp_ssinc[3] = {OSI_PTP_SSINC_4, OSI_PTP_SSINC_6, OSI_PTP_SSINC_4}; + + /* by default Fine method is enabled */ + /* Fix the SSINC value based on Exact MAC used */ + val = ptp_ssinc[l_core->l_mac_ver]; + + val |= val << MAC_SSIR_SSINC_SHIFT; + /* update Sub-second Increment Value */ + osi_writela(osi_core, val, ((nveu8_t *)addr + mac_ssir[osi_core->mac])); +} + +nve32_t hw_ptp_tsc_capture(struct osi_core_priv_data *const osi_core, + struct osi_core_ptp_tsc_data *data) +{ +#ifndef OSI_STRIPPED_LIB + const struct core_local *l_core = (struct core_local *)osi_core; +#endif /* !OSI_STRIPPED_LIB */ + void *addr = osi_core->base; + nveu32_t tsc_ptp = 0U; + nve32_t ret = 0; + +#ifndef OSI_STRIPPED_LIB + /* This code is NA for Orin use case */ + if (l_core->l_mac_ver < MAC_CORE_VER_TYPE_EQOS_5_30) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "ptp_tsc: older IP\n", 0ULL); + ret = -1; + goto done; + } +#endif /* !OSI_STRIPPED_LIB */ + + osi_writela(osi_core, OSI_ENABLE, (nveu8_t *)osi_core->base + WRAP_SYNC_TSC_PTP_CAPTURE); + + ret = poll_check(osi_core, ((nveu8_t *)addr + WRAP_SYNC_TSC_PTP_CAPTURE), + OSI_ENABLE, &tsc_ptp); + if (ret == -1) { + goto done; + } + + data->tsc_low_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + + WRAP_TSC_CAPTURE_LOW); + data->tsc_high_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + + WRAP_TSC_CAPTURE_HIGH); + data->ptp_low_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + + WRAP_PTP_CAPTURE_LOW); + data->ptp_high_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + + WRAP_PTP_CAPTURE_HIGH); +done: + return ret; +} + +#ifndef OSI_STRIPPED_LIB +static inline void config_l2_da_perfect_inverse_match( + struct osi_core_priv_data *osi_core, + nveu32_t perfect_inverse_match) +{ + nveu32_t value = 0U; + + value = osi_readla(osi_core, ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); + value &= ~MAC_PFR_DAIF; + if (perfect_inverse_match == OSI_INV_MATCH) { + /* Set DA Inverse Filtering */ + value |= MAC_PFR_DAIF; + } + osi_writela(osi_core, value, ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); +} +#endif /* !OSI_STRIPPED_LIB */ + +nve32_t hw_config_mac_pkt_filter_reg(struct osi_core_priv_data *const osi_core, + const struct osi_filter *filter) +{ + nveu32_t value = 0U; + nve32_t ret = 0; + + value = osi_readla(osi_core, ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); + + /*Retain all other values */ + value &= (MAC_PFR_DAIF | MAC_PFR_DBF | MAC_PFR_SAIF | + MAC_PFR_SAF | MAC_PFR_PCF | MAC_PFR_VTFE | + MAC_PFR_IPFE | MAC_PFR_DNTU | MAC_PFR_RA); + + if ((filter->oper_mode & OSI_OPER_EN_PERFECT) != OSI_DISABLE) { + value |= MAC_PFR_HPF; + } + +#ifndef OSI_STRIPPED_LIB + if ((filter->oper_mode & OSI_OPER_DIS_PERFECT) != OSI_DISABLE) { + value &= ~MAC_PFR_HPF; + } + + if ((filter->oper_mode & OSI_OPER_EN_PROMISC) != OSI_DISABLE) { + value |= MAC_PFR_PR; + } + + if ((filter->oper_mode & OSI_OPER_DIS_PROMISC) != OSI_DISABLE) { + value &= ~MAC_PFR_PR; + } + + if ((filter->oper_mode & OSI_OPER_EN_ALLMULTI) != OSI_DISABLE) { + value |= MAC_PFR_PM; + } + + if ((filter->oper_mode & OSI_OPER_DIS_ALLMULTI) != OSI_DISABLE) { + value &= ~MAC_PFR_PM; + } +#endif /* !OSI_STRIPPED_LIB */ + + osi_writela(osi_core, value, + ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); + +#ifndef OSI_STRIPPED_LIB + if ((filter->oper_mode & OSI_OPER_EN_L2_DA_INV) != OSI_DISABLE) { + config_l2_da_perfect_inverse_match(osi_core, OSI_INV_MATCH); + } + + if ((filter->oper_mode & OSI_OPER_DIS_L2_DA_INV) != OSI_DISABLE) { + config_l2_da_perfect_inverse_match(osi_core, OSI_PFT_MATCH); + } +#else + value = osi_readla(osi_core, ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); + value &= ~MAC_PFR_DAIF; + osi_writela(osi_core, value, ((nveu8_t *)osi_core->base + MAC_PKT_FILTER_REG)); + +#endif /* !OSI_STRIPPED_LIB */ + + return ret; +} + +nve32_t hw_config_l3_l4_filter_enable(struct osi_core_priv_data *const osi_core, + const nveu32_t filter_enb_dis) +{ + nveu32_t value = 0U; + void *base = osi_core->base; + nve32_t ret = 0; + + /* validate filter_enb_dis argument */ + if ((filter_enb_dis != OSI_ENABLE) && (filter_enb_dis != OSI_DISABLE)) { + OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + "Invalid filter_enb_dis value\n", + filter_enb_dis); + ret = -1; + goto fail; + } + + value = osi_readla(osi_core, ((nveu8_t *)base + MAC_PKT_FILTER_REG)); + value &= ~(MAC_PFR_IPFE); + value |= ((filter_enb_dis << MAC_PFR_IPFE_SHIFT) & MAC_PFR_IPFE); + osi_writela(osi_core, value, ((nveu8_t *)base + MAC_PKT_FILTER_REG)); +fail: + return ret; +} /** * @brief hw_est_read - indirect read the GCL to Software own list * (SWOL) * - * @param[in] base: MAC base IOVA address. + * @param[in] osi_core: OSI core private data structure. * @param[in] addr_val: Address offset for indirect write. * @param[in] data: Data to be written at offset. * @param[in] gcla: Gate Control List Address, 0 for ETS register. @@ -53,8 +637,7 @@ static inline nve32_t hw_est_read(struct osi_core_priv_data *osi_core, nve32_t ret; const nveu32_t MTL_EST_GCL_CONTROL[MAX_MAC_IP_TYPES] = { EQOS_MTL_EST_GCL_CONTROL, MGBE_MTL_EST_GCL_CONTROL}; - const nveu32_t MTL_EST_DATA[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_DATA, - MGBE_MTL_EST_DATA}; + const nveu32_t MTL_EST_DATA[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_DATA, MGBE_MTL_EST_DATA}; *data = 0U; val &= ~MTL_EST_ADDR_MASK; @@ -94,6 +677,7 @@ static inline nve32_t hw_est_read(struct osi_core_priv_data *osi_core, * * @param[in] osi_core: OSI core private data structure. * @param[in] est: Configuration input argument. + * @param[in] btr: Base time register value. * @param[in] mac: MAC index * * @note MAC should be init and started. see osi_start_mac() @@ -101,11 +685,11 @@ static inline nve32_t hw_est_read(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, - struct osi_est_config *const est, - const nveu32_t *btr, nveu32_t mac) +static nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, + struct osi_est_config *const est, + const nveu32_t *btr, nveu32_t mac) { - const struct core_local *l_core = (struct core_local *)osi_core; + const struct core_local *l_core = (struct core_local *)(void *)osi_core; const nveu32_t PTP_CYCLE_8[MAX_MAC_IP_TYPES] = {EQOS_8PTP_CYCLE, MGBE_8PTP_CYCLE}; const nveu32_t MTL_EST_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL, @@ -129,7 +713,7 @@ nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, nveu32_t bunk = 0U; nveu32_t est_status; nveu64_t old_btr, old_ctr; - nve32_t ret; + nve32_t ret = 0; nveu32_t val = 0U; nveu64_t rem = 0U; const struct est_read hw_read_arr[4] = { @@ -138,11 +722,50 @@ nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, {&ctr_l, MTL_EST_CTR_LOW[mac]}, {&ctr_h, MTL_EST_CTR_HIGH[mac]}}; + if (est->en_dis > OSI_ENABLE) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "input argument en_dis value\n", + (nveul64_t)est->en_dis); + ret = -1; + goto done; + } + if (est->llr > l_core->gcl_dep) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "input argument more than GCL depth\n", (nveul64_t)est->llr); - return -1; + ret = -1; + goto done; + } + + /* 24 bit configure time in GCL + 7) */ + if (est->ter > 0x7FFFFFFFU) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "invalid TER value\n", + (nveul64_t)est->ter); + ret = -1; + goto done; + } + + /* nenosec register value can't be more than 10^9 nsec */ + if ((est->ctr[0] > OSI_NSEC_PER_SEC) || + (est->btr[0] > OSI_NSEC_PER_SEC) || + (est->ctr[1] > 0xFFU)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "input argument CTR/BTR nsec is invalid\n", + 0ULL); + ret = -1; + goto done; + } + + /* if btr + offset is more than limit */ + if ((est->btr[0] > (OSI_NSEC_PER_SEC - est->btr_offset[0])) || + (est->btr[1] > (UINT_MAX - est->btr_offset[1]))) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "input argument BTR offset is invalid\n", + 0ULL); + ret = -1; + goto done; } ctr = ((nveu64_t)est->ctr[1] * OSI_NSEC_PER_SEC) + est->ctr[0]; @@ -155,12 +778,13 @@ nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, ((ctr - sum_tin) >= PTP_CYCLE_8[mac])) { continue; } else if (((ctr - sum_ti) != 0U) && - ((ctr - sum_ti) < PTP_CYCLE_8[mac])) { + ((ctr - sum_ti) < PTP_CYCLE_8[mac])) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CTR issue due to trancate\n", (nveul64_t)i); - return -1; + ret = -1; + goto done; } else { //do nothing } @@ -171,16 +795,17 @@ nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "validation of GCL entry failed\n", (nveul64_t)i); - return -1; + ret = -1; + goto done; } /* Check for BTR in case of new ETS while current GCL enabled */ - val = osi_readla(osi_core, - (nveu8_t *)osi_core->base + + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + MTL_EST_CONTROL[mac]); if ((val & MTL_EST_CONTROL_EEST) != MTL_EST_CONTROL_EEST) { - return 0; + ret = 0; + goto done; } /* Read EST_STATUS for bunk */ @@ -200,7 +825,7 @@ nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Reading failed for index\n", (nveul64_t)i); - return ret; + goto done; } } @@ -211,18 +836,1000 @@ nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, if ((rem != OSI_NONE) && (rem < PTP_CYCLE_8[mac])) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid BTR", (nveul64_t)rem); - return -1; + ret = -1; + goto done; } } else if (btr_new > old_btr) { rem = (btr_new - old_btr) % old_ctr; if ((rem != OSI_NONE) && (rem < PTP_CYCLE_8[mac])) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid BTR", (nveul64_t)rem); - return -1; + ret = -1; + goto done; } } else { // Nothing to do } - return 0; +done: + return ret; +} + +/** + * @brief hw_est_write - indirect write the GCL to Software own list + * (SWOL) + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] addr_val: Address offset for indirect write. + * @param[in] data: Data to be written at offset. + * @param[in] gcla: Gate Control List Address, 0 for ETS register. + * 1 for GCL memory. + * + * @note MAC should be init and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t hw_est_write(struct osi_core_priv_data *osi_core, + nveu32_t addr_val, nveu32_t data, + nveu32_t gcla) +{ + nve32_t retry = 1000; + nveu32_t val = 0x0; + nve32_t ret = 0; + const nveu32_t MTL_EST_DATA[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_DATA, + MGBE_MTL_EST_DATA}; + const nveu32_t MTL_EST_GCL_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_GCL_CONTROL, + MGBE_MTL_EST_GCL_CONTROL}; + + osi_writela(osi_core, data, (nveu8_t *)osi_core->base + + MTL_EST_DATA[osi_core->mac]); + + val &= ~MTL_EST_ADDR_MASK; + val |= (gcla == 1U) ? 0x0U : MTL_EST_GCRR; + val |= MTL_EST_SRWO; + val |= addr_val; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MTL_EST_GCL_CONTROL[osi_core->mac]); + + while (--retry > 0) { + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_EST_GCL_CONTROL[osi_core->mac]); + if ((val & MTL_EST_SRWO) == MTL_EST_SRWO) { + osi_core->osd_ops.udelay(OSI_DELAY_1US); + continue; + } + + break; + } + + if (((val & MTL_EST_ERR0) == MTL_EST_ERR0) || + (retry <= 0)) { + ret = -1; + } + + return ret; +} + +/** + * @brief hw_config_est - Read Setting for GCL from input and update + * registers. + * + * Algorithm: + * 1) Write TER, LLR and EST control register + * 2) Update GCL to sw own GCL (MTL_EST_Status bit SWOL will tell which is + * owned by SW) and store which GCL is in use currently in sw. + * 3) TODO set DBGB and DBGM for debugging + * 4) EST_data and GCRR to 1, update entry sno in ADDR and put data at + * est_gcl_data enable GCL MTL_EST_SSWL and wait for self clear or use + * SWLC in MTL_EST_Status. Please note new GCL will be pushed for each entry. + * 5) Configure btr. Update btr based on current time (current time + * should be updated based on PTP by this time) + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] est: EST configuration input argument. + * + * @note MAC should be init and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t hw_config_est(struct osi_core_priv_data *const osi_core, + struct osi_est_config *const est) +{ + nveu32_t btr[2] = {0}; + nveu32_t val = 0x0; + void *base = osi_core->base; + nveu32_t i; + nve32_t ret = 0; + nveu32_t addr = 0x0; + const nveu32_t MTL_EST_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL, + MGBE_MTL_EST_CONTROL}; + const nveu32_t MTL_EST_BTR_LOW[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_BTR_LOW, + MGBE_MTL_EST_BTR_LOW}; + const nveu32_t MTL_EST_BTR_HIGH[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_BTR_HIGH, + MGBE_MTL_EST_BTR_HIGH}; + const nveu32_t MTL_EST_CTR_LOW[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_LOW, + MGBE_MTL_EST_CTR_LOW}; + const nveu32_t MTL_EST_CTR_HIGH[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTR_HIGH, + MGBE_MTL_EST_CTR_HIGH}; + const nveu32_t MTL_EST_TER[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_TER, + MGBE_MTL_EST_TER}; + const nveu32_t MTL_EST_LLR[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_LLR, + MGBE_MTL_EST_LLR}; + + if ((osi_core->hw_feature != OSI_NULL) && + (osi_core->hw_feature->est_sel == OSI_DISABLE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "EST not supported in HW\n", 0ULL); + ret = -1; + goto done; + } + + if (est->en_dis == OSI_DISABLE) { + val = osi_readla(osi_core, (nveu8_t *)base + + MTL_EST_CONTROL[osi_core->mac]); + val &= ~MTL_EST_EEST; + osi_writela(osi_core, val, (nveu8_t *)base + + MTL_EST_CONTROL[osi_core->mac]); + + ret = 0; + } else { + btr[0] = est->btr[0]; + btr[1] = est->btr[1]; + if ((btr[0] == 0U) && (btr[1] == 0U)) { + common_get_systime_from_mac(osi_core->base, + osi_core->mac, + &btr[1], &btr[0]); + } + + if (gcl_validate(osi_core, est, btr, osi_core->mac) < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL validation failed\n", 0LL); + ret = -1; + goto done; + } + + ret = hw_est_write(osi_core, MTL_EST_CTR_LOW[osi_core->mac], est->ctr[0], 0); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL CTR[0] failed\n", 0LL); + goto done; + } + /* check for est->ctr[i] not more than FF, TODO as per hw config + * parameter we can have max 0x3 as this value in sec */ + est->ctr[1] &= MTL_EST_CTR_HIGH_MAX; + ret = hw_est_write(osi_core, MTL_EST_CTR_HIGH[osi_core->mac], est->ctr[1], 0); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL CTR[1] failed\n", 0LL); + goto done; + } + + ret = hw_est_write(osi_core, MTL_EST_TER[osi_core->mac], est->ter, 0); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL TER failed\n", 0LL); + goto done; + } + + ret = hw_est_write(osi_core, MTL_EST_LLR[osi_core->mac], est->llr, 0); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL LLR failed\n", 0LL); + goto done; + } + + /* Write GCL table */ + for (i = 0U; i < est->llr; i++) { + addr = i; + addr = addr << MTL_EST_ADDR_SHIFT; + addr &= MTL_EST_ADDR_MASK; + ret = hw_est_write(osi_core, addr, est->gcl[i], 1); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL enties write failed\n", + (nveul64_t)i); + goto done; + } + } + + /* Write parameters */ + ret = hw_est_write(osi_core, MTL_EST_BTR_LOW[osi_core->mac], + btr[0] + est->btr_offset[0], OSI_DISABLE); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL BTR[0] failed\n", + (btr[0] + est->btr_offset[0])); + goto done; + } + + ret = hw_est_write(osi_core, MTL_EST_BTR_HIGH[osi_core->mac], + btr[1] + est->btr_offset[1], OSI_DISABLE); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "GCL BTR[1] failed\n", + (btr[1] + est->btr_offset[1])); + goto done; + } + + val = osi_readla(osi_core, (nveu8_t *)base + + MTL_EST_CONTROL[osi_core->mac]); + /* Store table */ + val |= MTL_EST_SSWL; + val |= MTL_EST_EEST; + val |= MTL_EST_QHLBF; + osi_writela(osi_core, val, (nveu8_t *)base + + MTL_EST_CONTROL[osi_core->mac]); + } +done: + return ret; +} + +/** + * @brief hw_config_fpe - Read Setting for preemption and express for TC + * and update registers. + * + * Algorithm: + * 1) Check for TC enable and TC has masked for setting to preemptable. + * 2) update FPE control status register + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] fpe: FPE configuration input argument. + * + * @note MAC should be init and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t hw_config_fpe(struct osi_core_priv_data *const osi_core, + struct osi_fpe_config *const fpe) +{ + nveu32_t i = 0U; + nveu32_t val = 0U; + nveu32_t temp = 0U, temp1 = 0U; + nveu32_t temp_shift = 0U; + nve32_t ret = 0; + const nveu32_t MTL_FPE_CTS[MAX_MAC_IP_TYPES] = {EQOS_MTL_FPE_CTS, + MGBE_MTL_FPE_CTS}; + const nveu32_t MAC_FPE_CTS[MAX_MAC_IP_TYPES] = {EQOS_MAC_FPE_CTS, + MGBE_MAC_FPE_CTS}; + const nveu32_t max_number_queue[MAX_MAC_IP_TYPES] = {OSI_EQOS_MAX_NUM_QUEUES, + OSI_MGBE_MAX_NUM_QUEUES}; + const nveu32_t MAC_RQC1R[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R, + MGBE_MAC_RQC1R}; + const nveu32_t MAC_RQC1R_RQ[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ, + MGBE_MAC_RQC1R_RQ}; + const nveu32_t MAC_RQC1R_RQ_SHIFT[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ_SHIFT, + MGBE_MAC_RQC1R_RQ_SHIFT}; + const nveu32_t MTL_FPE_ADV[MAX_MAC_IP_TYPES] = {EQOS_MTL_FPE_ADV, + MGBE_MTL_FPE_ADV}; + + if ((osi_core->hw_feature != OSI_NULL) && + (osi_core->hw_feature->fpe_sel == OSI_DISABLE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "FPE not supported in HW\n", 0ULL); + ret = -1; + goto error; + } + + /* Only 8 TC */ + if (fpe->tx_queue_preemption_enable > 0xFFU) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "FPE input tx_queue_preemption_enable is invalid\n", + (nveul64_t)fpe->tx_queue_preemption_enable); + ret = -1; + goto error; + } + + if (osi_core->mac == OSI_MAC_HW_MGBE) { +#ifdef MACSEC_SUPPORT + osi_lock_irq_enabled(&osi_core->macsec_fpe_lock); + /* MACSEC and FPE cannot coexist on MGBE refer bug 3484034 */ + if (osi_core->is_macsec_enabled == OSI_ENABLE) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "FPE and MACSEC cannot co-exist\n", 0ULL); + ret = -1; + goto done; + } +#endif /* MACSEC_SUPPORT */ + } + + osi_core->fpe_ready = OSI_DISABLE; + + if (((fpe->tx_queue_preemption_enable << MTL_FPE_CTS_PEC_SHIFT) & + MTL_FPE_CTS_PEC) == OSI_DISABLE) { + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_FPE_CTS[osi_core->mac]); + val &= ~MTL_FPE_CTS_PEC; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MTL_FPE_CTS[osi_core->mac]); + + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MAC_FPE_CTS[osi_core->mac]); + val &= ~MAC_FPE_CTS_EFPE; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MAC_FPE_CTS[osi_core->mac]); + + if (osi_core->mac == OSI_MAC_HW_MGBE) { +#ifdef MACSEC_SUPPORT + osi_core->is_fpe_enabled = OSI_DISABLE; +#endif /* MACSEC_SUPPORT */ + } + ret = 0; + } else { + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_FPE_CTS[osi_core->mac]); + val &= ~MTL_FPE_CTS_PEC; + for (i = 0U; i < OSI_MAX_TC_NUM; i++) { + /* max 8 bit for this structure fot TC/TXQ. Set the TC for express or + * preemption. Default is express for a TC. DWCXG_NUM_TC = 8 */ + temp = OSI_BIT(i); + if ((fpe->tx_queue_preemption_enable & temp) == temp) { + temp_shift = i; + temp_shift += MTL_FPE_CTS_PEC_SHIFT; + /* set queue for preemtable */ + if (temp_shift < MTL_FPE_CTS_PEC_MAX_SHIFT) { + temp1 = OSI_ENABLE; + temp1 = temp1 << temp_shift; + val |= temp1; + } else { + /* Do nothing */ + } + } + } + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MTL_FPE_CTS[osi_core->mac]); + + if ((fpe->rq == 0x0U) || (fpe->rq >= max_number_queue[osi_core->mac])) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "FPE init failed due to wrong RQ\n", fpe->rq); + ret = -1; + goto done; + } + + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MAC_RQC1R[osi_core->mac]); + val &= ~MAC_RQC1R_RQ[osi_core->mac]; + temp = fpe->rq; + temp = temp << MAC_RQC1R_RQ_SHIFT[osi_core->mac]; + temp = (temp & MAC_RQC1R_RQ[osi_core->mac]); + val |= temp; + osi_core->residual_queue = fpe->rq; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MAC_RQC1R[osi_core->mac]); + + if (osi_core->mac == OSI_MAC_HW_MGBE) { + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MGBE_MAC_RQC4R); + val &= ~MGBE_MAC_RQC4R_PMCBCQ; + temp = fpe->rq; + temp = temp << MGBE_MAC_RQC4R_PMCBCQ_SHIFT; + temp = (temp & MGBE_MAC_RQC4R_PMCBCQ); + val |= temp; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MGBE_MAC_RQC4R); + } + /* initiate SVER for SMD-V and SMD-R */ + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_FPE_CTS[osi_core->mac]); + val |= MAC_FPE_CTS_SVER; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MAC_FPE_CTS[osi_core->mac]); + + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_FPE_ADV[osi_core->mac]); + val &= ~MTL_FPE_ADV_HADV_MASK; + //(minimum_fragment_size +IPG/EIPG + Preamble) *.8 ~98ns for10G + val |= MTL_FPE_ADV_HADV_VAL; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MTL_FPE_ADV[osi_core->mac]); + + if (osi_core->mac == OSI_MAC_HW_MGBE) { +#ifdef MACSEC_SUPPORT + osi_core->is_fpe_enabled = OSI_ENABLE; +#endif /* MACSEC_SUPPORT */ + } + } +done: + + if (osi_core->mac == OSI_MAC_HW_MGBE) { +#ifdef MACSEC_SUPPORT + osi_unlock_irq_enabled(&osi_core->macsec_fpe_lock); +#endif /* MACSEC_SUPPORT */ + } + +error: + return ret; +} + +/** + * @brief enable_mtl_interrupts - Enable MTL interrupts + * + * Algorithm: enable MTL interrupts for EST + * + * @param[in] osi_core: OSI core private data structure. + * + * @note MAC should be init and started. see osi_start_mac() + */ +static inline void enable_mtl_interrupts(struct osi_core_priv_data *osi_core) +{ + nveu32_t mtl_est_ir = OSI_DISABLE; + const nveu32_t MTL_EST_ITRE[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_ITRE, + MGBE_MTL_EST_ITRE}; + + mtl_est_ir = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_EST_ITRE[osi_core->mac]); + /* enable only MTL interrupt realted to + * Constant Gate Control Error + * Head-Of-Line Blocking due to Scheduling + * Head-Of-Line Blocking due to Frame Size + * BTR Error + * Switch to S/W owned list Complete + */ + mtl_est_ir |= (MTL_EST_ITRE_CGCE | MTL_EST_ITRE_IEHS | + MTL_EST_ITRE_IEHF | MTL_EST_ITRE_IEBE | + MTL_EST_ITRE_IECC); + osi_writela(osi_core, mtl_est_ir, (nveu8_t *)osi_core->base + + MTL_EST_ITRE[osi_core->mac]); +} + +/** + * @brief enable_fpe_interrupts - Enable MTL interrupts + * + * Algorithm: enable FPE interrupts + * + * @param[in] osi_core: OSI core private data structure. + * + * @note MAC should be init and started. see osi_start_mac() + */ +static inline void enable_fpe_interrupts(struct osi_core_priv_data *osi_core) +{ + nveu32_t value = OSI_DISABLE; + const nveu32_t MAC_IER[MAX_MAC_IP_TYPES] = {EQOS_MAC_IMR, + MGBE_MAC_IER}; + const nveu32_t IMR_FPEIE[MAX_MAC_IP_TYPES] = {EQOS_IMR_FPEIE, + MGBE_IMR_FPEIE}; + + /* Read MAC IER Register and enable Frame Preemption Interrupt + * Enable */ + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MAC_IER[osi_core->mac]); + value |= IMR_FPEIE[osi_core->mac]; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + MAC_IER[osi_core->mac]); +} + +/** + * @brief save_gcl_params - save GCL configs in local core structure + * + * @param[in] osi_core: OSI core private data structure. + * + * @note MAC should be init and started. see osi_start_mac() + */ +static inline void save_gcl_params(struct osi_core_priv_data *osi_core) +{ + struct core_local *l_core = (struct core_local *)(void *)osi_core; + const nveu32_t gcl_widhth[4] = {0, OSI_MAX_24BITS, OSI_MAX_28BITS, + OSI_MAX_32BITS}; + const nveu32_t gcl_ti_mask[4] = {0, OSI_MASK_16BITS, OSI_MASK_20BITS, + OSI_MASK_24BITS}; + const nveu32_t gcl_depthth[6] = {0, OSI_GCL_SIZE_64, OSI_GCL_SIZE_128, + OSI_GCL_SIZE_256, OSI_GCL_SIZE_512, + OSI_GCL_SIZE_1024}; + + if ((osi_core->hw_feature->gcl_width == 0U) || + (osi_core->hw_feature->gcl_width > 3U)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Wrong HW feature GCL width\n", + (nveul64_t)osi_core->hw_feature->gcl_width); + } else { + l_core->gcl_width_val = + gcl_widhth[osi_core->hw_feature->gcl_width]; + l_core->ti_mask = gcl_ti_mask[osi_core->hw_feature->gcl_width]; + } + + if ((osi_core->hw_feature->gcl_depth == 0U) || + (osi_core->hw_feature->gcl_depth > 5U)) { + /* Do Nothing */ + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Wrong HW feature GCL depth\n", + (nveul64_t)osi_core->hw_feature->gcl_depth); + } else { + l_core->gcl_dep = gcl_depthth[osi_core->hw_feature->gcl_depth]; + } +} + +/** + * @brief hw_tsn_init - initialize TSN feature + * + * Algorithm: + * 1) If hardware support EST, + * a) Set default EST configuration + * b) Set enable interrupts + * 2) If hardware supports FPE + * a) Set default FPE configuration + * b) enable interrupts + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] est_sel: EST HW support present or not + * @param[in] fpe_sel: FPE HW support present or not + * + * @note MAC should be init and started. see osi_start_mac() + */ +void hw_tsn_init(struct osi_core_priv_data *osi_core, + nveu32_t est_sel, nveu32_t fpe_sel) +{ + nveu32_t val = 0x0; + nveu32_t temp = 0U; + const nveu32_t MTL_EST_CONTROL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL, + MGBE_MTL_EST_CONTROL}; + const nveu32_t MTL_EST_CONTROL_PTOV[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_PTOV, + MGBE_MTL_EST_CONTROL_PTOV}; + const nveu32_t MTL_EST_PTOV_RECOMMEND[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_PTOV_RECOMMEND, + MGBE_MTL_EST_PTOV_RECOMMEND}; + const nveu32_t MTL_EST_CONTROL_PTOV_SHIFT[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_PTOV_SHIFT, + MGBE_MTL_EST_CONTROL_PTOV_SHIFT}; + const nveu32_t MTL_EST_CONTROL_CTOV[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_CTOV, + MGBE_MTL_EST_CONTROL_CTOV}; + const nveu32_t MTL_EST_CTOV_RECOMMEND[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CTOV_RECOMMEND, + MGBE_MTL_EST_CTOV_RECOMMEND}; + const nveu32_t MTL_EST_CONTROL_CTOV_SHIFT[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_CTOV_SHIFT, + MGBE_MTL_EST_CONTROL_CTOV_SHIFT}; + const nveu32_t MTL_EST_CONTROL_LCSE[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_LCSE, + MGBE_MTL_EST_CONTROL_LCSE}; + const nveu32_t MTL_EST_CONTROL_LCSE_VAL[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_LCSE_VAL, + MGBE_MTL_EST_CONTROL_LCSE_VAL}; + const nveu32_t MTL_EST_CONTROL_DDBF[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_CONTROL_DDBF, + MGBE_MTL_EST_CONTROL_DDBF}; + const nveu32_t MTL_EST_OVERHEAD[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_OVERHEAD, + MGBE_MTL_EST_OVERHEAD}; + const nveu32_t MTL_EST_OVERHEAD_OVHD[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_OVERHEAD_OVHD, + MGBE_MTL_EST_OVERHEAD_OVHD}; + const nveu32_t MTL_EST_OVERHEAD_RECOMMEND[MAX_MAC_IP_TYPES] = {EQOS_MTL_EST_OVERHEAD_RECOMMEND, + MGBE_MTL_EST_OVERHEAD_RECOMMEND}; + const nveu32_t MAC_RQC1R[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R, + MGBE_MAC_RQC1R}; + const nveu32_t MAC_RQC1R_RQ[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ, + MGBE_MAC_RQC1R_RQ}; + const nveu32_t MAC_RQC1R_RQ_SHIFT[MAX_MAC_IP_TYPES] = {EQOS_MAC_RQC1R_FPRQ_SHIFT, + MGBE_MAC_RQC1R_RQ_SHIFT}; + + if (est_sel == OSI_ENABLE) { + save_gcl_params(osi_core); + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_EST_CONTROL[osi_core->mac]); + + /* + * PTOV PTP clock period * 6 + * dual-port RAM based asynchronous FIFO controllers or + * Single-port RAM based synchronous FIFO controllers + * CTOV 96 x Tx clock period + * : + * : + * set other default value + */ + val &= ~MTL_EST_CONTROL_PTOV[osi_core->mac]; + temp = MTL_EST_PTOV_RECOMMEND[osi_core->mac]; + temp = temp << MTL_EST_CONTROL_PTOV_SHIFT[osi_core->mac]; + val |= temp; + + val &= ~MTL_EST_CONTROL_CTOV[osi_core->mac]; + temp = MTL_EST_CTOV_RECOMMEND[osi_core->mac]; + temp = temp << MTL_EST_CONTROL_CTOV_SHIFT[osi_core->mac]; + val |= temp; + + /*Loop Count to report Scheduling Error*/ + val &= ~MTL_EST_CONTROL_LCSE[osi_core->mac]; + val |= MTL_EST_CONTROL_LCSE_VAL[osi_core->mac]; + + if (osi_core->mac == OSI_MAC_HW_EQOS) { + val &= ~EQOS_MTL_EST_CONTROL_DFBS; + } + val &= ~MTL_EST_CONTROL_DDBF[osi_core->mac]; + val |= MTL_EST_CONTROL_DDBF[osi_core->mac]; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MTL_EST_CONTROL[osi_core->mac]); + + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MTL_EST_OVERHEAD[osi_core->mac]); + val &= ~MTL_EST_OVERHEAD_OVHD[osi_core->mac]; + /* As per hardware programming info */ + val |= MTL_EST_OVERHEAD_RECOMMEND[osi_core->mac]; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MTL_EST_OVERHEAD[osi_core->mac]); + + enable_mtl_interrupts(osi_core); + } + + if (fpe_sel == OSI_ENABLE) { + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MAC_RQC1R[osi_core->mac]); + val &= ~MAC_RQC1R_RQ[osi_core->mac]; + temp = osi_core->residual_queue; + temp = temp << MAC_RQC1R_RQ_SHIFT[osi_core->mac]; + temp = (temp & MAC_RQC1R_RQ[osi_core->mac]); + val |= temp; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MAC_RQC1R[osi_core->mac]); + + if (osi_core->mac == OSI_MAC_HW_MGBE) { + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MGBE_MAC_RQC4R); + val &= ~MGBE_MAC_RQC4R_PMCBCQ; + temp = osi_core->residual_queue; + temp = temp << MGBE_MAC_RQC4R_PMCBCQ_SHIFT; + temp = (temp & MGBE_MAC_RQC4R_PMCBCQ); + val |= temp; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + MGBE_MAC_RQC4R); + } + + enable_fpe_interrupts(osi_core); + } + + /* CBS setting for TC or TXQ for default configuration + user application should use IOCTL to set CBS as per requirement + */ +} + +#ifdef HSI_SUPPORT +/** + * @brief hsi_common_error_inject + * + * Algorithm: + * - For macsec HSI: trigger interrupt using MACSEC_*_INTERRUPT_SET_0 register + * - For mmc counter based: trigger interrupt by incrementing count by threshold value + * - For rest: Directly set the error detected as there is no other mean to induce error + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] error_code: Ethernet HSI error code + * + * @note MAC should be init and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t hsi_common_error_inject(struct osi_core_priv_data *osi_core, + nveu32_t error_code) +{ + nve32_t ret = 0; + + switch (error_code) { + case OSI_INBOUND_BUS_CRC_ERR: + osi_core->hsi.inject_crc_err_count = + osi_update_stats_counter(osi_core->hsi.inject_crc_err_count, + osi_core->hsi.err_count_threshold); + break; + case OSI_RECEIVE_CHECKSUM_ERR: + osi_core->hsi.inject_udp_err_count = + osi_update_stats_counter(osi_core->hsi.inject_udp_err_count, + osi_core->hsi.err_count_threshold); + break; + case OSI_MACSEC_RX_CRC_ERR: + osi_writela(osi_core, MACSEC_RX_MAC_CRC_ERROR, + (nveu8_t *)osi_core->macsec_base + + MACSEC_RX_ISR_SET); + break; + case OSI_MACSEC_TX_CRC_ERR: + osi_writela(osi_core, MACSEC_TX_MAC_CRC_ERROR, + (nveu8_t *)osi_core->macsec_base + + MACSEC_TX_ISR_SET); + break; + case OSI_MACSEC_RX_ICV_ERR: + osi_writela(osi_core, MACSEC_RX_ICV_ERROR, + (nveu8_t *)osi_core->macsec_base + + MACSEC_RX_ISR_SET); + break; + case OSI_MACSEC_REG_VIOL_ERR: + osi_writela(osi_core, MACSEC_SECURE_REG_VIOL, + (nveu8_t *)osi_core->macsec_base + + MACSEC_COMMON_ISR_SET); + break; + case OSI_TX_FRAME_ERR: + osi_core->hsi.report_count_err[TX_FRAME_ERR_IDX] = OSI_ENABLE; + osi_core->hsi.err_code[TX_FRAME_ERR_IDX] = OSI_TX_FRAME_ERR; + osi_core->hsi.report_err = OSI_ENABLE; + break; + case OSI_PCS_AUTONEG_ERR: + osi_core->hsi.err_code[AUTONEG_ERR_IDX] = OSI_PCS_AUTONEG_ERR; + osi_core->hsi.report_err = OSI_ENABLE; + osi_core->hsi.report_count_err[AUTONEG_ERR_IDX] = OSI_ENABLE; + break; + case OSI_XPCS_WRITE_FAIL_ERR: + osi_core->hsi.err_code[XPCS_WRITE_FAIL_IDX] = OSI_XPCS_WRITE_FAIL_ERR; + osi_core->hsi.report_err = OSI_ENABLE; + osi_core->hsi.report_count_err[XPCS_WRITE_FAIL_IDX] = OSI_ENABLE; + break; + default: + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Invalid error code\n", (nveu32_t)error_code); + ret = -1; + break; + } + + return ret; +} +#endif + +/** + * @brief prepare_l3l4_ctr_reg - Prepare control register for L3L4 filters. + * + * @note + * Algorithm: + * - This sequence is used to prepare L3L4 control register for SA and DA Port Number matching. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * @param[out] ctr_reg: Pointer to L3L4 CTR register value + * + * @note 1) MAC should be init and started. see osi_start_mac() + * + * @retval L3L4 CTR register value + */ +static void prepare_l3l4_ctr_reg(const struct osi_core_priv_data *const osi_core, + const struct osi_l3_l4_filter *const l3_l4, + nveu32_t *ctr_reg) +{ +#ifndef OSI_STRIPPED_LIB + nveu32_t dma_routing_enable = l3_l4->dma_routing_enable; + nveu32_t dst_addr_match = l3_l4->data.dst.addr_match; +#else + nveu32_t dma_routing_enable = OSI_TRUE; + nveu32_t dst_addr_match = OSI_TRUE; +#endif /* !OSI_STRIPPED_LIB */ + const nveu32_t dma_chan_en_shift[2] = { + EQOS_MAC_L3L4_CTR_DMCHEN_SHIFT, + MGBE_MAC_L3L4_CTR_DMCHEN_SHIFT + }; + nveu32_t value = 0U; + + /* set routing dma channel */ + value |= dma_routing_enable << (dma_chan_en_shift[osi_core->mac] & 0x1FU); + value |= l3_l4->dma_chan << MAC_L3L4_CTR_DMCHN_SHIFT; + + /* Enable L3 filters for IPv4 DESTINATION addr matching */ + value |= dst_addr_match << MAC_L3L4_CTR_L3DAM_SHIFT; + +#ifndef OSI_STRIPPED_LIB + /* Enable L3 filters for IPv4 DESTINATION addr INV matching */ + value |= l3_l4->data.dst.addr_match_inv << MAC_L3L4_CTR_L3DAIM_SHIFT; + + /* Enable L3 filters for IPv4 SOURCE addr matching */ + value |= (l3_l4->data.src.addr_match << MAC_L3L4_CTR_L3SAM_SHIFT) | + (l3_l4->data.src.addr_match_inv << MAC_L3L4_CTR_L3SAIM_SHIFT); + + /* Enable L4 filters for DESTINATION port No matching */ + value |= (l3_l4->data.dst.port_match << MAC_L3L4_CTR_L4DPM_SHIFT) | + (l3_l4->data.dst.port_match_inv << MAC_L3L4_CTR_L4DPIM_SHIFT); + + /* Enable L4 filters for SOURCE Port No matching */ + value |= (l3_l4->data.src.port_match << MAC_L3L4_CTR_L4SPM_SHIFT) | + (l3_l4->data.src.port_match_inv << MAC_L3L4_CTR_L4SPIM_SHIFT); + + /* set udp / tcp port matching bit (for l4) */ + value |= l3_l4->data.is_udp << MAC_L3L4_CTR_L4PEN_SHIFT; + + /* set ipv4 / ipv6 protocol matching bit (for l3) */ + value |= l3_l4->data.is_ipv6 << MAC_L3L4_CTR_L3PEN_SHIFT; +#endif /* !OSI_STRIPPED_LIB */ + + *ctr_reg = value; +} + +/** + * @brief prepare_l3_addr_registers - prepare register data for IPv4/IPv6 address filtering + * + * @note + * Algorithm: + * - Update IPv4/IPv6 source/destination address for L3 layer filtering. + * - For IPv4, both source/destination address can be configured but + * for IPv6, only one of the source/destination address can be configured. + * + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * @param[out] l3_addr1_reg: Pointer to L3 ADDR1 register value + * + * @note 1) MAC should be init and started. see osi_start_mac() + */ +static void prepare_l3_addr_registers(const struct osi_l3_l4_filter *const l3_l4, +#ifndef OSI_STRIPPED_LIB + nveu32_t *l3_addr0_reg, + nveu32_t *l3_addr2_reg, + nveu32_t *l3_addr3_reg, +#endif /* !OSI_STRIPPED_LIB */ + nveu32_t *l3_addr1_reg) +{ +#ifndef OSI_STRIPPED_LIB + if (l3_l4->data.is_ipv6 == OSI_TRUE) { + const nveu16_t *addr; + /* For IPv6, either source address or destination + * address only one of them can be enabled + */ + if (l3_l4->data.src.addr_match == OSI_TRUE) { + /* select src address only */ + addr = l3_l4->data.src.ip6_addr; + } else { + /* select dst address only */ + addr = l3_l4->data.dst.ip6_addr; + } + /* update Bits[31:0] of 128-bit IP addr */ + *l3_addr0_reg = addr[7] | ((nveu32_t)addr[6] << 16); + + /* update Bits[63:32] of 128-bit IP addr */ + *l3_addr1_reg = addr[5] | ((nveu32_t)addr[4] << 16); + + /* update Bits[95:64] of 128-bit IP addr */ + *l3_addr2_reg = addr[3] | ((nveu32_t)addr[2] << 16); + + /* update Bits[127:96] of 128-bit IP addr */ + *l3_addr3_reg = addr[1] | ((nveu32_t)addr[0] << 16); + } else { +#endif /* !OSI_STRIPPED_LIB */ + const nveu8_t *addr; + nveu32_t value; + +#ifndef OSI_STRIPPED_LIB + /* set source address */ + addr = l3_l4->data.src.ip4_addr; + value = addr[3]; + value |= (nveu32_t)addr[2] << 8; + value |= (nveu32_t)addr[1] << 16; + value |= (nveu32_t)addr[0] << 24; + *l3_addr0_reg = value; +#endif /* !OSI_STRIPPED_LIB */ + + /* set destination address */ + addr = l3_l4->data.dst.ip4_addr; + value = addr[3]; + value |= (nveu32_t)addr[2] << 8; + value |= (nveu32_t)addr[1] << 16; + value |= (nveu32_t)addr[0] << 24; + *l3_addr1_reg = value; +#ifndef OSI_STRIPPED_LIB + } +#endif /* !OSI_STRIPPED_LIB */ +} + +#ifndef OSI_STRIPPED_LIB +/** + * @brief prepare_l4_port_register - program source and destination port number + * + * @note + * Algorithm: + * - Program l4 address register with source and destination port numbers. + * + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * @param[out] l4_addr_reg: Pointer to L3 ADDR0 register value + * + * @note 1) MAC should be init and started. see osi_start_mac() + * 3) DCS bits should be enabled in RXQ to DMA mapping register + */ +static void prepare_l4_port_register(const struct osi_l3_l4_filter *const l3_l4, + nveu32_t *l4_addr_reg) +{ + nveu32_t value = 0U; + + /* set source port */ + value |= ((nveu32_t)l3_l4->data.src.port_no + & MGBE_MAC_L4_ADDR_SP_MASK); + + /* set destination port */ + value |= (((nveu32_t)l3_l4->data.dst.port_no << + MGBE_MAC_L4_ADDR_DP_SHIFT) & MGBE_MAC_L4_ADDR_DP_MASK); + + *l4_addr_reg = value; +} +#endif /* !OSI_STRIPPED_LIB */ + +/** + * @brief prepare_l3l4_registers - function to prepare l3l4 registers + * + * @note + * Algorithm: + * - If filter to be enabled, + * - Prepare l3 ip address registers using prepare_l3_addr_registers(). + * - Prepare l4 port register using prepare_l4_port_register(). + * - Prepare l3l4 control register using prepare_l3l4_ctr_reg(). + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * @param[out] l3_addr1_reg: Pointer to L3 ADDR1 register value + * @param[out] ctr_reg: Pointer to L3L4 CTR register value + * + * @note 1) MAC should be init and started. see osi_start_mac() + * 2) osi_core->osd should be populated + * 3) DCS bits should be enabled in RXQ to DMA mapping register + */ +void prepare_l3l4_registers(const struct osi_core_priv_data *const osi_core, + const struct osi_l3_l4_filter *const l3_l4, +#ifndef OSI_STRIPPED_LIB + nveu32_t *l3_addr0_reg, + nveu32_t *l3_addr2_reg, + nveu32_t *l3_addr3_reg, + nveu32_t *l4_addr_reg, +#endif /* !OSI_STRIPPED_LIB */ + nveu32_t *l3_addr1_reg, + nveu32_t *ctr_reg) +{ + /* prepare regiser data if filter to be enabled */ + if (l3_l4->filter_enb_dis == OSI_TRUE) { + /* prepare l3 filter ip address register data */ + prepare_l3_addr_registers(l3_l4, +#ifndef OSI_STRIPPED_LIB + l3_addr0_reg, + l3_addr2_reg, + l3_addr3_reg, +#endif /* !OSI_STRIPPED_LIB */ + l3_addr1_reg); + +#ifndef OSI_STRIPPED_LIB + /* prepare l4 filter port register data */ + prepare_l4_port_register(l3_l4, l4_addr_reg); +#endif /* !OSI_STRIPPED_LIB */ + + /* prepare control register data */ + prepare_l3l4_ctr_reg(osi_core, l3_l4, ctr_reg); + } +} + +/** + * @brief hw_validate_avb_input- validate input arguments + * + * Algorithm: + * 1) Check if idle slope is valid + * 2) Check if send slope is valid + * 3) Check if hi credit is valid + * 4) Check if low credit is valid + * + * @param[in] osi_core: osi core priv data structure + * @param[in] avb: structure having configuration for avb algorithm + * + * @note 1) MAC should be init and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +nve32_t hw_validate_avb_input(struct osi_core_priv_data *const osi_core, + const struct osi_core_avb_algorithm *const avb) +{ + nve32_t ret = 0; + nveu32_t ETS_QW_ISCQW_MASK[MAX_MAC_IP_TYPES] = {EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK, + MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK}; + nveu32_t ETS_SSCR_SSC_MASK[MAX_MAC_IP_TYPES] = {EQOS_MTL_TXQ_ETS_SSCR_SSC_MASK, + MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK}; + nveu32_t ETS_HC_BOUND = 0x8000000U; + nveu32_t ETS_LC_BOUND = 0xF8000000U; + nveu32_t mac = osi_core->mac; + + if (avb->idle_slope > ETS_QW_ISCQW_MASK[mac]) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid idle_slope\n", + (nveul64_t)avb->idle_slope); + ret = -1; + goto fail; + } + if (avb->send_slope > ETS_SSCR_SSC_MASK[mac]) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid send_slope\n", + (nveul64_t)avb->send_slope); + ret = -1; + goto fail; + } + if (avb->hi_credit > ETS_HC_BOUND) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid hi credit\n", + (nveul64_t)avb->hi_credit); + ret = -1; + goto fail; + } + if ((avb->low_credit < ETS_LC_BOUND) && + (avb->low_credit != 0U)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid low credit\n", + (nveul64_t)avb->low_credit); + ret = -1; + goto fail; + } +fail: + return ret; } diff --git a/kernel/nvethernetrm/osi/core/core_common.h b/kernel/nvethernetrm/osi/core/core_common.h index 81b69a7954..c74b579e50 100644 --- a/kernel/nvethernetrm/osi/core/core_common.h +++ b/kernel/nvethernetrm/osi/core/core_common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,11 +24,20 @@ #define INCLUDED_CORE_COMMON_H #include "core_local.h" + +#ifndef OSI_STRIPPED_LIB +#define MAC_PFR_PR OSI_BIT(0) +#define MAC_TCR_TSCFUPDT OSI_BIT(1) +#define MAC_TCR_TSCTRLSSR OSI_BIT(9) +#define MAC_PFR_PM OSI_BIT(4) +#endif /* !OSI_STRIPPED_LIB */ + +#define MTL_EST_ADDR_SHIFT 8 #define MTL_EST_ADDR_MASK (OSI_BIT(8) | OSI_BIT(9) | \ OSI_BIT(10) | OSI_BIT(11) | \ OSI_BIT(12) | OSI_BIT(13) | \ OSI_BIT(14) | OSI_BIT(15) | \ - OSI_BIT(16) | (17) | \ + OSI_BIT(16) | (17U) | \ OSI_BIT(18) | OSI_BIT(19)) #define MTL_EST_SRWO OSI_BIT(0) #define MTL_EST_R1W0 OSI_BIT(1) @@ -38,6 +47,75 @@ #define MTL_EST_ERR0 OSI_BIT(20) #define MTL_EST_CONTROL_EEST OSI_BIT(0) #define MTL_EST_STATUS_SWOL OSI_BIT(7) +/* EST control OSI_BIT map */ +#define MTL_EST_EEST OSI_BIT(0) +#define MTL_EST_SSWL OSI_BIT(1) +#define MTL_EST_QHLBF OSI_BIT(3) +#define MTL_EST_CTR_HIGH_MAX 0xFFU +#define MTL_EST_ITRE_CGCE OSI_BIT(4) +#define MTL_EST_ITRE_IEHS OSI_BIT(3) +#define MTL_EST_ITRE_IEHF OSI_BIT(2) +#define MTL_EST_ITRE_IEBE OSI_BIT(1) +#define MTL_EST_ITRE_IECC OSI_BIT(0) +/* MTL_FPE_CTRL_STS */ +#define MTL_FPE_CTS_PEC (OSI_BIT(8) | OSI_BIT(9) | \ + OSI_BIT(10) | OSI_BIT(11) | \ + OSI_BIT(12) | OSI_BIT(13) | \ + OSI_BIT(14) | OSI_BIT(15)) +#define MTL_FPE_CTS_PEC_SHIFT 8U +#define MTL_FPE_CTS_PEC_MAX_SHIFT 16U +#define MAC_FPE_CTS_EFPE OSI_BIT(0) +#define MAC_FPE_CTS_SVER OSI_BIT(1) +/* MTL FPE adv registers */ +#define MTL_FPE_ADV_HADV_MASK (0xFFFFU) +#define MTL_FPE_ADV_HADV_VAL 100U +#define DMA_MODE_SWR OSI_BIT(0) +#define MTL_QTOMR_FTQ OSI_BIT(0) +#define MTL_RXQ_OP_MODE_FEP OSI_BIT(4) +#define MAC_TCR_TSINIT OSI_BIT(2) +#define MAC_TCR_TSADDREG OSI_BIT(5) +#define MAC_PPS_CTL_PPSCTRL0 (OSI_BIT(3) | OSI_BIT(2) |\ + OSI_BIT(1) | OSI_BIT(0)) +#define MAC_SSIR_SSINC_SHIFT 16U +#define MAC_PFR_DAIF OSI_BIT(3) +#define MAC_PFR_DBF OSI_BIT(5) +#define MAC_PFR_PCF (OSI_BIT(6) | OSI_BIT(7)) +#define MAC_PFR_SAIF OSI_BIT(8) +#define MAC_PFR_SAF OSI_BIT(9) +#define MAC_PFR_HPF OSI_BIT(10) +#define MAC_PFR_VTFE OSI_BIT(16) +#define MAC_PFR_IPFE OSI_BIT(20) +#define MAC_PFR_IPFE_SHIFT 20U +#define MAC_PFR_DNTU OSI_BIT(21) +#define MAC_PFR_RA OSI_BIT(31) + +#define WRAP_SYNC_TSC_PTP_CAPTURE 0x800CU +#define WRAP_TSC_CAPTURE_LOW 0x8010U +#define WRAP_TSC_CAPTURE_HIGH 0x8014U +#define WRAP_PTP_CAPTURE_LOW 0x8018U +#define WRAP_PTP_CAPTURE_HIGH 0x801CU +#define MAC_PKT_FILTER_REG 0x0008 +#define HW_MAC_IER 0x00B4U +#define WRAP_COMMON_INTR_ENABLE 0x8704U + +/* common l3 l4 register bit fields for eqos and mgbe */ +#ifndef OSI_STRIPPED_LIB +#define MAC_L3L4_CTR_L3PEN_SHIFT 0 +#define MAC_L3L4_CTR_L3SAM_SHIFT 2 +#define MAC_L3L4_CTR_L3SAIM_SHIFT 3 +#endif /* !OSI_STRIPPED_LIB */ +#define MAC_L3L4_CTR_L3DAM_SHIFT 4 +#ifndef OSI_STRIPPED_LIB +#define MAC_L3L4_CTR_L3DAIM_SHIFT 5 +#define MAC_L3L4_CTR_L4PEN_SHIFT 16 +#define MAC_L3L4_CTR_L4SPM_SHIFT 18 +#define MAC_L3L4_CTR_L4SPIM_SHIFT 19 +#define MAC_L3L4_CTR_L4DPM_SHIFT 20 +#define MAC_L3L4_CTR_L4DPIM_SHIFT 21 +#endif /* !OSI_STRIPPED_LIB */ +#define MAC_L3L4_CTR_DMCHN_SHIFT 24 +#define EQOS_MAC_L3L4_CTR_DMCHEN_SHIFT 28 +#define MGBE_MAC_L3L4_CTR_DMCHEN_SHIFT 31 /** * @addtogroup typedef related info @@ -47,15 +125,57 @@ */ struct est_read { - /* variable pointer */ + /** variable pointer */ nveu32_t *var; - /* memory register/address offset */ + /** memory register/address offset */ nveu32_t addr; }; /** @} */ -nve32_t gcl_validate(struct osi_core_priv_data *const osi_core, - struct osi_est_config *const est, - const nveu32_t *btr, nveu32_t mac); +nve32_t hw_poll_for_swr(struct osi_core_priv_data *const osi_core); +void hw_start_mac(struct osi_core_priv_data *const osi_core); +void hw_stop_mac(struct osi_core_priv_data *const osi_core); +nve32_t hw_set_mode(struct osi_core_priv_data *const osi_core, const nve32_t mode); +nve32_t hw_set_speed(struct osi_core_priv_data *const osi_core, const nve32_t speed); +nve32_t hw_flush_mtl_tx_queue(struct osi_core_priv_data *const osi_core, + const nveu32_t q_inx); +nve32_t hw_config_fw_err_pkts(struct osi_core_priv_data *osi_core, + const nveu32_t q_inx, const nveu32_t enable_fw_err_pkts); +nve32_t hw_config_rxcsum_offload(struct osi_core_priv_data *const osi_core, + nveu32_t enabled); +nve32_t hw_set_systime_to_mac(struct osi_core_priv_data *const osi_core, + const nveu32_t sec, const nveu32_t nsec); +nve32_t hw_config_addend(struct osi_core_priv_data *const osi_core, + const nveu32_t addend); +void hw_config_tscr(struct osi_core_priv_data *const osi_core, const nveu32_t ptp_filter); +void hw_config_ssir(struct osi_core_priv_data *const osi_core); +nve32_t hw_ptp_tsc_capture(struct osi_core_priv_data *const osi_core, + struct osi_core_ptp_tsc_data *data); +nve32_t hw_config_mac_pkt_filter_reg(struct osi_core_priv_data *const osi_core, + const struct osi_filter *filter); +nve32_t hw_config_l3_l4_filter_enable(struct osi_core_priv_data *const osi_core, + const nveu32_t filter_enb_dis); +nve32_t hw_config_est(struct osi_core_priv_data *const osi_core, + struct osi_est_config *const est); +nve32_t hw_config_fpe(struct osi_core_priv_data *const osi_core, + struct osi_fpe_config *const fpe); +void hw_tsn_init(struct osi_core_priv_data *osi_core, + nveu32_t est_sel, nveu32_t fpe_sel); +void prepare_l3l4_registers(const struct osi_core_priv_data *const osi_core, + const struct osi_l3_l4_filter *const l3_l4, +#ifndef OSI_STRIPPED_LIB + nveu32_t *l3_addr0_reg, + nveu32_t *l3_addr2_reg, + nveu32_t *l3_addr3_reg, + nveu32_t *l4_addr_reg, +#endif /* !OSI_STRIPPED_LIB */ + nveu32_t *l3_addr1_reg, + nveu32_t *ctr_reg); +#ifdef HSI_SUPPORT +nve32_t hsi_common_error_inject(struct osi_core_priv_data *osi_core, + nveu32_t error_code); +#endif +nve32_t hw_validate_avb_input(struct osi_core_priv_data *const osi_core, + const struct osi_core_avb_algorithm *const avb); #endif /* INCLUDED_CORE_COMMON_H */ diff --git a/kernel/nvethernetrm/osi/core/core_local.h b/kernel/nvethernetrm/osi/core/core_local.h index d616c53dcf..36774759b8 100644 --- a/kernel/nvethernetrm/osi/core/core_local.h +++ b/kernel/nvethernetrm/osi/core/core_local.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -43,13 +43,51 @@ */ #define MAX_TX_TS_CNT (PKT_ID_CNT * OSI_MGBE_MAX_NUM_CHANS) +/** + * @brief FIFO size helper macro + */ +#define FIFO_SZ(x) ((((x) * 1024U) / 256U) - 1U) + +/** + * @brief Dynamic configuration helper macros. + */ +#define DYNAMIC_CFG_L3_L4 OSI_BIT(0) +#define DYNAMIC_CFG_AVB OSI_BIT(2) +#define DYNAMIC_CFG_L2 OSI_BIT(3) +#define DYNAMIC_CFG_L2_IDX 3U +#define DYNAMIC_CFG_RXCSUM OSI_BIT(4) +#define DYNAMIC_CFG_PTP OSI_BIT(7) +#define DYNAMIC_CFG_EST OSI_BIT(8) +#define DYNAMIC_CFG_FPE OSI_BIT(9) +#define DYNAMIC_CFG_FRP OSI_BIT(10) + +#ifndef OSI_STRIPPED_LIB +#define DYNAMIC_CFG_FC OSI_BIT(1) +#define DYNAMIC_CFG_VLAN OSI_BIT(5) +#define DYNAMIC_CFG_EEE OSI_BIT(6) +#define DYNAMIC_CFG_FC_IDX 1U +#define DYNAMIC_CFG_VLAN_IDX 5U +#define DYNAMIC_CFG_EEE_IDX 6U +#endif /* !OSI_STRIPPED_LIB */ + +#define DYNAMIC_CFG_L3_L4_IDX 0U +#define DYNAMIC_CFG_AVB_IDX 2U +#define DYNAMIC_CFG_L2_IDX 3U +#define DYNAMIC_CFG_RXCSUM_IDX 4U +#define DYNAMIC_CFG_PTP_IDX 7U +#define DYNAMIC_CFG_EST_IDX 8U +#define DYNAMIC_CFG_FPE_IDX 9U +#define DYNAMIC_CFG_FRP_IDX 10U + +#define OSI_SUSPENDED OSI_BIT(0) + + /** * interface core ops */ struct if_core_ops { /** Interface function called to initialize MAC and MTL registers */ - nve32_t (*if_core_init)(struct osi_core_priv_data *const osi_core, - nveu32_t tx_fifo_size, nveu32_t rx_fifo_size); + nve32_t (*if_core_init)(struct osi_core_priv_data *const osi_core); /** Interface function called to deinitialize MAC and MTL registers */ nve32_t (*if_core_deinit)(struct osi_core_priv_data *const osi_core); /** Interface function called to write into a PHY reg over MDIO bus */ @@ -72,103 +110,26 @@ struct if_core_ops { * @brief Initialize MAC & MTL core operations. */ struct core_ops { - /** Called to poll for software reset bit */ - nve32_t (*poll_for_swr)(struct osi_core_priv_data *const osi_core); /** Called to initialize MAC and MTL registers */ - nve32_t (*core_init)(struct osi_core_priv_data *const osi_core, - const nveu32_t tx_fifo_size, - const nveu32_t rx_fifo_size); - /** Called to deinitialize MAC and MTL registers */ - void (*core_deinit)(struct osi_core_priv_data *const osi_core); - /** Called to start MAC Tx and Rx engine */ - void (*start_mac)(struct osi_core_priv_data *const osi_core); - /** Called to stop MAC Tx and Rx engine */ - void (*stop_mac)(struct osi_core_priv_data *const osi_core); + nve32_t (*core_init)(struct osi_core_priv_data *const osi_core); /** Called to handle common interrupt */ void (*handle_common_intr)(struct osi_core_priv_data *const osi_core); - /** Called to set the mode at MAC (full/duplex) */ - nve32_t (*set_mode)(struct osi_core_priv_data *const osi_core, - const nve32_t mode); - /** Called to set the speed at MAC */ - nve32_t (*set_speed)(struct osi_core_priv_data *const osi_core, - const nve32_t speed); /** Called to do pad caliberation */ nve32_t (*pad_calibrate)(struct osi_core_priv_data *const osi_core); - /** Called to configure MTL RxQ to forward the err pkt */ - nve32_t (*config_fw_err_pkts)(struct osi_core_priv_data *const osi_core, - const nveu32_t qinx, - const nveu32_t fw_err); - /** Called to configure Rx Checksum offload engine */ - nve32_t (*config_rxcsum_offload)( - struct osi_core_priv_data *const osi_core, - const nveu32_t enabled); - /** Called to config mac packet filter */ - nve32_t (*config_mac_pkt_filter_reg)( - struct osi_core_priv_data *const osi_core, - const struct osi_filter *filter); /** Called to update MAC address 1-127 */ nve32_t (*update_mac_addr_low_high_reg)( struct osi_core_priv_data *const osi_core, const struct osi_filter *filter); - /** Called to configure l3/L4 filter */ - nve32_t (*config_l3_l4_filter_enable)( - struct osi_core_priv_data *const osi_core, - const nveu32_t enable); - /** Called to configure L3 filter */ - nve32_t (*config_l3_filters)(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu32_t enb_dis, - const nveu32_t ipv4_ipv6_match, - const nveu32_t src_dst_addr_match, - const nveu32_t perfect_inverse_match, - const nveu32_t dma_routing_enable, - const nveu32_t dma_chan); - /** Called to update ip4 src or desc address */ - nve32_t (*update_ip4_addr)(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu8_t addr[], - const nveu32_t src_dst_addr_match); - /** Called to update ip6 address */ - nve32_t (*update_ip6_addr)(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu16_t addr[]); - /** Called to configure L4 filter */ - nve32_t (*config_l4_filters)(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu32_t enb_dis, - const nveu32_t tcp_udp_match, - const nveu32_t src_dst_port_match, - const nveu32_t perfect_inverse_match, - const nveu32_t dma_routing_enable, - const nveu32_t dma_chan); - /** Called to update L4 Port for filter packet */ - nve32_t (*update_l4_port_no)(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu16_t port_no, - const nveu32_t src_dst_port_match); - /** Called to set the addend value to adjust the time */ - nve32_t (*config_addend)(struct osi_core_priv_data *const osi_core, - const nveu32_t addend); + /** Called to configure L3L4 filter */ + nve32_t (*config_l3l4_filters)(struct osi_core_priv_data *const osi_core, + nveu32_t filter_no, + const struct osi_l3_l4_filter *const l3_l4); /** Called to adjust the mac time */ nve32_t (*adjust_mactime)(struct osi_core_priv_data *const osi_core, const nveu32_t sec, const nveu32_t nsec, const nveu32_t neg_adj, const nveu32_t one_nsec_accuracy); - /** Called to set current system time to MAC */ - nve32_t (*set_systime_to_mac)(struct osi_core_priv_data *const osi_core, - const nveu32_t sec, - const nveu32_t nsec); - /** Called to configure the TimeStampControl register */ - void (*config_tscr)(struct osi_core_priv_data *const osi_core, - const nveu32_t ptp_filter); - /** Called to configure the sub second increment register */ - void (*config_ssir)(struct osi_core_priv_data *const osi_core, - const nveu32_t ptp_clock); - /** Called to configure the PTP RX packets Queue */ - nve32_t (*config_ptp_rxq)(struct osi_core_priv_data *const osi_core, - const unsigned int rxq_idx, - const unsigned int enable); /** Called to update MMC counter from HW register */ void (*read_mmc)(struct osi_core_priv_data *const osi_core); /** Called to write into a PHY reg over MDIO bus */ @@ -180,6 +141,9 @@ struct core_ops { nve32_t (*read_phy_reg)(struct osi_core_priv_data *const osi_core, const nveu32_t phyaddr, const nveu32_t phyreg); + /** Called to get HW features */ + nve32_t (*get_hw_features)(struct osi_core_priv_data *const osi_core, + struct osi_hw_features *hw_feat); /** Called to read reg */ nveu32_t (*read_reg)(struct osi_core_priv_data *const osi_core, const nve32_t reg); @@ -195,20 +159,12 @@ struct core_ops { nveu32_t (*write_macsec_reg)(struct osi_core_priv_data *const osi_core, const nveu32_t val, const nve32_t reg); +#ifndef OSI_STRIPPED_LIB + void (*macsec_config_mac)(struct osi_core_priv_data *const osi_core, + const nveu32_t enable); +#endif /* !OSI_STRIPPED_LIB */ #endif /* MACSEC_SUPPORT */ #ifndef OSI_STRIPPED_LIB - /** Called periodically to read and validate safety critical - * registers against last written value */ - nve32_t (*validate_regs)(struct osi_core_priv_data *const osi_core); - /** Called to flush MTL Tx queue */ - nve32_t (*flush_mtl_tx_queue)(struct osi_core_priv_data *const osi_core, - const nveu32_t qinx); - /** Called to set av parameter */ - nve32_t (*set_avb_algorithm)(struct osi_core_priv_data *const osi_core, - const struct osi_core_avb_algorithm *const avb); - /** Called to get av parameter */ - nve32_t (*get_avb_algorithm)(struct osi_core_priv_data *const osi_core, - struct osi_core_avb_algorithm *const avb); /** Called to configure the MTL to forward/drop tx status */ nve32_t (*config_tx_status)(struct osi_core_priv_data *const osi_core, const nveu32_t tx_status); @@ -224,6 +180,9 @@ struct core_ops { nve32_t (*config_arp_offload)(struct osi_core_priv_data *const osi_core, const nveu32_t enable, const nveu8_t *ip_addr); + /** Called to configure HW PTP offload feature */ + nve32_t (*config_ptp_offload)(struct osi_core_priv_data *const osi_core, + struct osi_pto_config *const pto_config); /** Called to configure VLAN filtering */ nve32_t (*config_vlan_filtering)( struct osi_core_priv_data *const osi_core, @@ -236,10 +195,6 @@ struct core_ops { void (*configure_eee)(struct osi_core_priv_data *const osi_core, const nveu32_t tx_lpi_enabled, const nveu32_t tx_lpi_timer); - /** Called to save MAC register space during SoC suspend */ - nve32_t (*save_registers)(struct osi_core_priv_data *const osi_core); - /** Called to restore MAC control registers during SoC resume */ - nve32_t (*restore_registers)(struct osi_core_priv_data *const osi_core); /** Called to set MDC clock rate for MDIO operation */ void (*set_mdc_clk_rate)(struct osi_core_priv_data *const osi_core, const nveu64_t csr_clk_rate); @@ -247,63 +202,54 @@ struct core_ops { nve32_t (*config_mac_loopback)( struct osi_core_priv_data *const osi_core, const nveu32_t lb_mode); -#endif /* !OSI_STRIPPED_LIB */ - /** Called to get HW features */ - nve32_t (*get_hw_features)(struct osi_core_priv_data *const osi_core, - struct osi_hw_features *hw_feat); /** Called to configure RSS for MAC */ nve32_t (*config_rss)(struct osi_core_priv_data *osi_core); - /** Called to update GCL config */ - int (*hw_config_est)(struct osi_core_priv_data *const osi_core, - struct osi_est_config *const est); - /** Called to update FPE config */ - int (*hw_config_fpe)(struct osi_core_priv_data *const osi_core, - struct osi_fpe_config *const fpe); + /** Called to configure the PTP RX packets Queue */ + nve32_t (*config_ptp_rxq)(struct osi_core_priv_data *const osi_core, + const nveu32_t rxq_idx, + const nveu32_t enable); +#endif /* !OSI_STRIPPED_LIB */ + /** Called to set av parameter */ + nve32_t (*set_avb_algorithm)(struct osi_core_priv_data *const osi_core, + const struct osi_core_avb_algorithm *const avb); + /** Called to get av parameter */ + nve32_t (*get_avb_algorithm)(struct osi_core_priv_data *const osi_core, + struct osi_core_avb_algorithm *const avb); /** Called to configure FRP engine */ - int (*config_frp)(struct osi_core_priv_data *const osi_core, - const unsigned int enabled); + nve32_t (*config_frp)(struct osi_core_priv_data *const osi_core, + const nveu32_t enabled); /** Called to update FRP Instruction Table entry */ - int (*update_frp_entry)(struct osi_core_priv_data *const osi_core, - const unsigned int pos, - struct osi_core_frp_data *const data); + nve32_t (*update_frp_entry)(struct osi_core_priv_data *const osi_core, + const nveu32_t pos, + struct osi_core_frp_data *const data); /** Called to update FRP NVE and */ - int (*update_frp_nve)(struct osi_core_priv_data *const osi_core, - const unsigned int nve); - /** Called to configure HW PTP offload feature */ - int (*config_ptp_offload)(struct osi_core_priv_data *const osi_core, - struct osi_pto_config *const pto_config); -#ifdef MACSEC_SUPPORT - void (*macsec_config_mac)(struct osi_core_priv_data *const osi_core, - const nveu32_t enable); -#endif /* MACSEC_SUPPORT */ - int (*ptp_tsc_capture)(struct osi_core_priv_data *const osi_core, - struct osi_core_ptp_tsc_data *data); + nve32_t (*update_frp_nve)(struct osi_core_priv_data *const osi_core, + const nveu32_t nve); #ifdef HSI_SUPPORT /** Interface function called to initialize HSI */ - int (*core_hsi_configure)(struct osi_core_priv_data *const osi_core, + nve32_t (*core_hsi_configure)(struct osi_core_priv_data *const osi_core, const nveu32_t enable); + /** Interface function called to inject error */ + nve32_t (*core_hsi_inject_err)(struct osi_core_priv_data *const osi_core, + const nveu32_t error_code); #endif }; /** * @brief constant values for drift MAC to MAC sync. */ -#ifndef DRIFT_CAL -#define DRIFT_CAL 1 -#define I_COMPONENT_BY_10 3 -#define P_COMPONENT_BY_10 7 -#define WEIGHT_BY_10 10 -#define CONST_FACTOR 8 //(1sec/125ns) -#define MAX_FREQ 85000000LL -#endif -#define EQOS_SEC_OFFSET 0xB08 -#define EQOS_NSEC_OFFSET 0xB0C -#define MGBE_SEC_OFFSET 0xD08 -#define MGBE_NSEC_OFFSET 0xD0C -#define ETHER_NSEC_MASK 0x7FFFFFFF -#define SERVO_STATS_0 0 -#define SERVO_STATS_1 1 -#define SERVO_STATS_2 2 +/* No longer needed since DRIFT CAL is not used */ +#define I_COMPONENT_BY_10 3LL +#define P_COMPONENT_BY_10 7LL +#define WEIGHT_BY_10 10LL +#define MAX_FREQ_POS 250000000LL +#define MAX_FREQ_NEG -250000000LL +#define SERVO_STATS_0 0U +#define SERVO_STATS_1 1U +#define SERVO_STATS_2 2U +#define OSI_NSEC_PER_SEC_SIGNED 1000000000LL + +#define ETHER_NSEC_MASK 0x7FFFFFFFU /** * @brief servo data structure. @@ -330,6 +276,64 @@ struct core_ptp_servo { nveu32_t m2m_lock; }; +/** + * @brief AVB dynamic config storage structure + */ +struct core_avb { + /** Represend whether AVB config done or not */ + nveu32_t used; + /** AVB data structure */ + struct osi_core_avb_algorithm avb_info; +}; + +/** + * @brief VLAN dynamic config storage structure + */ +struct core_vlan { + /** VID to be stored */ + nveu32_t vid; + /** Represens whether VLAN config done or not */ + nveu32_t used; +}; + +/** + * @brief L2 filter dynamic config storage structure + */ +struct core_l2 { + nveu32_t used; + struct osi_filter filter; +}; + +/** + * @brief Dynamic config storage structure + */ +struct dynamic_cfg { + nveu32_t flags; + /** L3_L4 filters */ + struct osi_l3_l4_filter l3_l4[OSI_MGBE_MAX_L3_L4_FILTER]; + /** flow control */ + nveu32_t flow_ctrl; + /** AVB */ + struct core_avb avb[OSI_MGBE_MAX_NUM_QUEUES]; + /** RXCSUM */ + nveu32_t rxcsum; + /** VLAN arguments storage */ + struct core_vlan vlan[VLAN_NUM_VID]; + /** LPI parameters storage */ + nveu32_t tx_lpi_enabled; + nveu32_t tx_lpi_timer; + /** PTP information storage */ + nveu32_t ptp; + /** EST information storage */ + struct osi_est_config est; + /** FPE information storage */ + struct osi_fpe_config fpe; + /** L2 filter storage */ + struct osi_filter l2_filter; + /** L2 filter configuration */ + struct core_l2 l2[EQOS_MAX_MAC_ADDRESS_FILTER]; +}; + /** * @brief Core local data structure. */ @@ -351,7 +355,7 @@ struct core_local { /** This is the head node for PTP packet ID queue */ struct osi_core_tx_ts tx_ts_head; /** Maximum number of queues/channels */ - nveu32_t max_chans; + nveu32_t num_max_chans; /** GCL depth supported by HW */ nveu32_t gcl_dep; /** Max GCL width (time + gate) value supported by HW */ @@ -370,12 +374,25 @@ struct core_local { nveu32_t pps_freq; /** Time interval mask for GCL entry */ nveu32_t ti_mask; + /** Hardware dynamic configuration context */ + struct dynamic_cfg cfg; + /** Hardware dynamic configuration state */ + nveu32_t state; + /** XPCS Lane bringup/Block lock status */ + nveu32_t lane_status; + /** Exact MAC used across SOCs 0:Legacy EQOS, 1:Orin EQOS, 2:Orin MGBE */ + nveu32_t l_mac_ver; +#if defined(L3L4_WILDCARD_FILTER) + /** l3l4 wildcard filter configured (OSI_ENABLE) / not configured (OSI_DISABLE) */ + nveu32_t l3l4_wildcard_filter_configured; +#endif /* L3L4_WILDCARD_FILTER */ }; /** - * @brief eqos_init_core_ops - Initialize EQOS core operations. + * @brief update_counter_u - Increment nveu32_t counter * - * @param[in] ops: Core operations pointer. + * @param[out] value: Pointer to value to be incremented. + * @param[in] incr: increment value * * @note * API Group: @@ -383,10 +400,19 @@ struct core_local { * - Run time: No * - De-initialization: No */ -void eqos_init_core_ops(struct core_ops *ops); +static inline void update_counter_u(nveu32_t *value, nveu32_t incr) +{ + nveu32_t temp = *value + incr; + + if (temp < *value) { + /* Overflow, so reset it to zero */ + *value = 0U; + } + *value = temp; +} /** - * @brief ivc_init_core_ops - Initialize IVC core operations. + * @brief eqos_init_core_ops - Initialize EQOS core operations. * * @param[in] ops: Core operations pointer. * @@ -396,7 +422,7 @@ void eqos_init_core_ops(struct core_ops *ops); * - Run time: No * - De-initialization: No */ -void ivc_init_core_ops(struct core_ops *ops); +void eqos_init_core_ops(struct core_ops *ops); /** * @brief mgbe_init_core_ops - Initialize MGBE core operations. diff --git a/kernel/nvethernetrm/osi/core/debug.c b/kernel/nvethernetrm/osi/core/debug.c index 8b16296ba9..72fc3b50c3 100644 --- a/kernel/nvethernetrm/osi/core/debug.c +++ b/kernel/nvethernetrm/osi/core/debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,10 +32,10 @@ * */ static void core_dump_struct(struct osi_core_priv_data *osi_core, - unsigned char *ptr, + nveu8_t *ptr, unsigned long size) { - nveu32_t i = 0, rem, j; + nveu32_t i = 0, rem, j = 0; unsigned long temp; if (ptr == OSI_NULL) { @@ -72,40 +72,40 @@ void core_structs_dump(struct osi_core_priv_data *osi_core) osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "CORE struct size = %lu", sizeof(struct osi_core_priv_data)); - core_dump_struct(osi_core, (unsigned char *)osi_core, + core_dump_struct(osi_core, (nveu8_t *)osi_core, sizeof(struct osi_core_priv_data)); #ifdef MACSEC_SUPPORT osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "MACSEC ops size = %lu", sizeof(struct osi_macsec_core_ops)); - core_dump_struct(osi_core, (unsigned char *)osi_core->macsec_ops, + core_dump_struct(osi_core, (nveu8_t *)osi_core->macsec_ops, sizeof(struct osi_macsec_core_ops)); osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "MACSEC LUT status size = %lu", sizeof(struct osi_macsec_lut_status)); - core_dump_struct(osi_core, (unsigned char *)osi_core->macsec_ops, + core_dump_struct(osi_core, (nveu8_t *)osi_core->macsec_ops, sizeof(struct osi_macsec_lut_status)); #endif osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "HW features size = %lu", sizeof(struct osi_hw_features)); - core_dump_struct(osi_core, (unsigned char *)osi_core->hw_feature, + core_dump_struct(osi_core, (nveu8_t *)osi_core->hw_feature, sizeof(struct osi_hw_features)); osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "core local size = %lu", sizeof(struct core_local)); - core_dump_struct(osi_core, (unsigned char *)l_core, + core_dump_struct(osi_core, (nveu8_t *)l_core, sizeof(struct core_local)); osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "core ops size = %lu", sizeof(struct core_ops)); - core_dump_struct(osi_core, (unsigned char *)l_core->ops_p, + core_dump_struct(osi_core, (nveu8_t *)l_core->ops_p, sizeof(struct core_ops)); osi_core->osd_ops.printf(osi_core, OSI_DEBUG_TYPE_STRUCTS, "if_ops_p struct size = %lu", sizeof(struct if_core_ops)); - core_dump_struct(osi_core, (unsigned char *)l_core->if_ops_p, + core_dump_struct(osi_core, (nveu8_t *)l_core->if_ops_p, sizeof(struct if_core_ops)); } @@ -116,9 +116,9 @@ void core_structs_dump(struct osi_core_priv_data *osi_core) */ void core_reg_dump(struct osi_core_priv_data *osi_core) { - unsigned int max_addr; - unsigned int addr = 0x0; - unsigned int reg_val; + nveu32_t max_addr; + nveu32_t addr = 0x0; + nveu32_t reg_val; switch (osi_core->mac_ver) { case OSI_EQOS_MAC_5_00: diff --git a/kernel/nvethernetrm/osi/core/debug.h b/kernel/nvethernetrm/osi/core/debug.h index 502951075e..60d06e1a65 100644 --- a/kernel/nvethernetrm/osi/core/debug.h +++ b/kernel/nvethernetrm/osi/core/debug.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,6 +20,7 @@ * DEALINGS IN THE SOFTWARE. */ +#ifdef OSI_DEBUG #ifndef INCLUDED_CORE_DEBUG_H #define INCLUDED_CORE_DEBUG_H @@ -32,3 +33,4 @@ void core_reg_dump(struct osi_core_priv_data *osi_core); void core_structs_dump(struct osi_core_priv_data *osi_core); #endif /* INCLUDED_CORE_DEBUG_H*/ +#endif /* OSI_DEBUG */ diff --git a/kernel/nvethernetrm/osi/core/eqos_core.c b/kernel/nvethernetrm/osi/core/eqos_core.c index 4ab9a96f83..2d987fe8b2 100644 --- a/kernel/nvethernetrm/osi/core/eqos_core.c +++ b/kernel/nvethernetrm/osi/core/eqos_core.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,8 +26,8 @@ #include "eqos_core.h" #include "eqos_mmc.h" #include "core_local.h" -#include "vlan_filter.h" #include "core_common.h" +#include "macsec.h" #ifdef UPDATED_PAD_CAL /* @@ -39,364 +39,7 @@ static nve32_t eqos_pre_pad_calibrate( struct osi_core_priv_data *const osi_core); #endif /* UPDATED_PAD_CAL */ -/** - * @brief eqos_core_safety_config - EQOS MAC core safety configuration - */ -static struct core_func_safety eqos_core_safety_config; - -/** - * @brief eqos_ptp_tsc_capture - read PTP and TSC registers - * - * Algorithm: - * - write 1 to ETHER_QOS_WRAP_SYNC_TSC_PTP_CAPTURE_0 - * - wait till ETHER_QOS_WRAP_SYNC_TSC_PTP_CAPTURE_0 is 0x0 - * - read and return following registers - * ETHER_QOS_WRAP_TSC_CAPTURE_LOW_0 - * ETHER_QOS_WRAP_TSC_CAPTURE_HIGH_0 - * ETHER_QOS_WRAP_PTP_CAPTURE_LOW_0 - * ETHER_QOS_WRAP_PTP_CAPTURE_HIGH_0 - * - * @param[in] base: EQOS virtual base address. - * @param[out]: osi_core_ptp_tsc_data register - * - * @note MAC needs to be out of reset and proper clock configured. TSC and PTP - * registers should be configured. - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_ptp_tsc_capture(struct osi_core_priv_data *const osi_core, - struct osi_core_ptp_tsc_data *data) -{ - nveu32_t retry = 20U; - nveu32_t count = 0U, val = 0U; - nve32_t cond = COND_NOT_MET; - nve32_t ret = -1; - - if (osi_core->mac_ver < OSI_EQOS_MAC_5_30) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "ptp_tsc: older IP\n", 0ULL); - goto done; - } - osi_writela(osi_core, OSI_ENABLE, (nveu8_t *)osi_core->base + - EQOS_WRAP_SYNC_TSC_PTP_CAPTURE); - - /* Poll Until Poll Condition */ - while (cond == COND_NOT_MET) { - if (count > retry) { - /* Max retries reached */ - goto done; - } - - count++; - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_WRAP_SYNC_TSC_PTP_CAPTURE); - if ((val & OSI_ENABLE) == OSI_NONE) { - cond = COND_MET; - } else { - /* delay if SWR is set */ - osi_core->osd_ops.udelay(1U); - } - } - - data->tsc_low_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_WRAP_TSC_CAPTURE_LOW); - data->tsc_high_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_WRAP_TSC_CAPTURE_HIGH); - data->ptp_low_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_WRAP_PTP_CAPTURE_LOW); - data->ptp_high_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_WRAP_PTP_CAPTURE_HIGH); - ret = 0; -done: - return ret; -} - -/** - * @brief eqos_core_safety_writel - Write to safety critical register. - * - * @note - * Algorithm: - * - Acquire RW lock, so that eqos_validate_core_regs does not run while - * updating the safety critical register. - * - call osi_writela() to actually update the memory mapped register. - * - Store the same value in eqos_core_safety_config->reg_val[idx], - * so that this latest value will be compared when eqos_validate_core_regs - * is scheduled. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] val: Value to be written. - * @param[in] addr: memory mapped register address to be written to. - * @param[in] idx: Index of register corresponding to enum func_safety_core_regs. - * - * @pre MAC has to be out of reset, and clocks supplied. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: Yes - */ -static inline void eqos_core_safety_writel( - struct osi_core_priv_data *const osi_core, - nveu32_t val, void *addr, - nveu32_t idx) -{ - struct core_func_safety *config = &eqos_core_safety_config; - - osi_lock_irq_enabled(&config->core_safety_lock); - osi_writela(osi_core, val, addr); - config->reg_val[idx] = (val & config->reg_mask[idx]); - osi_unlock_irq_enabled(&config->core_safety_lock); -} - -/** - * @brief Initialize the eqos_core_safety_config. - * - * @note - * Algorithm: - * - Populate the list of safety critical registers and provide - * the address of the register - * - Register mask (to ignore reserved/self-critical bits in the reg). - * See eqos_validate_core_regs which can be invoked periodically to compare - * the last written value to this register vs the actual value read when - * eqos_validate_core_regs is scheduled. - * - * @param[in] osi_core: OSI core private data structure. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_core_safety_init(struct osi_core_priv_data *const osi_core) -{ - struct core_func_safety *config = &eqos_core_safety_config; - nveu8_t *base = (nveu8_t *)osi_core->base; - nveu32_t val; - nveu32_t i, idx; - - /* Initialize all reg address to NULL, since we may not use - * some regs depending on the number of MTL queues enabled. - */ - for (i = EQOS_MAC_MCR_IDX; i < EQOS_MAX_CORE_SAFETY_REGS; i++) { - config->reg_addr[i] = OSI_NULL; - } - - /* Store reg addresses to run periodic read MAC registers.*/ - config->reg_addr[EQOS_MAC_MCR_IDX] = base + EQOS_MAC_MCR; - config->reg_addr[EQOS_MAC_PFR_IDX] = base + EQOS_MAC_PFR; - for (i = 0U; i < OSI_EQOS_MAX_HASH_REGS; i++) { - config->reg_addr[EQOS_MAC_HTR0_IDX + i] = - base + EQOS_MAC_HTR_REG(i); - } - config->reg_addr[EQOS_MAC_Q0_TXFC_IDX] = base + - EQOS_MAC_QX_TX_FLW_CTRL(0U); - config->reg_addr[EQOS_MAC_RQC0R_IDX] = base + EQOS_MAC_RQC0R; - config->reg_addr[EQOS_MAC_RQC1R_IDX] = base + EQOS_MAC_RQC1R; - config->reg_addr[EQOS_MAC_RQC2R_IDX] = base + EQOS_MAC_RQC2R; - config->reg_addr[EQOS_MAC_IMR_IDX] = base + EQOS_MAC_IMR; - config->reg_addr[EQOS_MAC_MA0HR_IDX] = base + EQOS_MAC_MA0HR; - config->reg_addr[EQOS_MAC_MA0LR_IDX] = base + EQOS_MAC_MA0LR; - config->reg_addr[EQOS_MAC_TCR_IDX] = base + EQOS_MAC_TCR; - config->reg_addr[EQOS_MAC_SSIR_IDX] = base + EQOS_MAC_SSIR; - config->reg_addr[EQOS_MAC_TAR_IDX] = base + EQOS_MAC_TAR; - config->reg_addr[EQOS_PAD_AUTO_CAL_CFG_IDX] = base + - EQOS_PAD_AUTO_CAL_CFG; - /* MTL registers */ - config->reg_addr[EQOS_MTL_RXQ_DMA_MAP0_IDX] = base + - EQOS_MTL_RXQ_DMA_MAP0; - for (i = 0U; i < osi_core->num_mtl_queues; i++) { - idx = osi_core->mtl_queues[i]; - if (idx >= OSI_EQOS_MAX_NUM_CHANS) { - continue; - } - - config->reg_addr[EQOS_MTL_CH0_TX_OP_MODE_IDX + idx] = base + - EQOS_MTL_CHX_TX_OP_MODE(idx); - config->reg_addr[EQOS_MTL_TXQ0_QW_IDX + idx] = base + - EQOS_MTL_TXQ_QW(idx); - config->reg_addr[EQOS_MTL_CH0_RX_OP_MODE_IDX + idx] = base + - EQOS_MTL_CHX_RX_OP_MODE(idx); - } - /* DMA registers */ - config->reg_addr[EQOS_DMA_SBUS_IDX] = base + EQOS_DMA_SBUS; - - /* Update the register mask to ignore reserved bits/self-clearing bits. - * MAC registers */ - config->reg_mask[EQOS_MAC_MCR_IDX] = EQOS_MAC_MCR_MASK; - config->reg_mask[EQOS_MAC_PFR_IDX] = EQOS_MAC_PFR_MASK; - for (i = 0U; i < OSI_EQOS_MAX_HASH_REGS; i++) { - config->reg_mask[EQOS_MAC_HTR0_IDX + i] = EQOS_MAC_HTR_MASK; - } - config->reg_mask[EQOS_MAC_Q0_TXFC_IDX] = EQOS_MAC_QX_TXFC_MASK; - config->reg_mask[EQOS_MAC_RQC0R_IDX] = EQOS_MAC_RQC0R_MASK; - config->reg_mask[EQOS_MAC_RQC1R_IDX] = EQOS_MAC_RQC1R_MASK; - config->reg_mask[EQOS_MAC_RQC2R_IDX] = EQOS_MAC_RQC2R_MASK; - config->reg_mask[EQOS_MAC_IMR_IDX] = EQOS_MAC_IMR_MASK; - config->reg_mask[EQOS_MAC_MA0HR_IDX] = EQOS_MAC_MA0HR_MASK; - config->reg_mask[EQOS_MAC_MA0LR_IDX] = EQOS_MAC_MA0LR_MASK; - config->reg_mask[EQOS_MAC_TCR_IDX] = EQOS_MAC_TCR_MASK; - config->reg_mask[EQOS_MAC_SSIR_IDX] = EQOS_MAC_SSIR_MASK; - config->reg_mask[EQOS_MAC_TAR_IDX] = EQOS_MAC_TAR_MASK; - config->reg_mask[EQOS_PAD_AUTO_CAL_CFG_IDX] = - EQOS_PAD_AUTO_CAL_CFG_MASK; - /* MTL registers */ - config->reg_mask[EQOS_MTL_RXQ_DMA_MAP0_IDX] = EQOS_RXQ_DMA_MAP0_MASK; - for (i = 0U; i < osi_core->num_mtl_queues; i++) { - idx = osi_core->mtl_queues[i]; - if (idx >= OSI_EQOS_MAX_NUM_CHANS) { - continue; - } - - config->reg_mask[EQOS_MTL_CH0_TX_OP_MODE_IDX + idx] = - EQOS_MTL_TXQ_OP_MODE_MASK; - config->reg_mask[EQOS_MTL_TXQ0_QW_IDX + idx] = - EQOS_MTL_TXQ_QW_MASK; - config->reg_mask[EQOS_MTL_CH0_RX_OP_MODE_IDX + idx] = - EQOS_MTL_RXQ_OP_MODE_MASK; - } - /* DMA registers */ - config->reg_mask[EQOS_DMA_SBUS_IDX] = EQOS_DMA_SBUS_MASK; - - /* Initialize current power-on-reset values of these registers */ - for (i = EQOS_MAC_MCR_IDX; i < EQOS_MAX_CORE_SAFETY_REGS; i++) { - if (config->reg_addr[i] == OSI_NULL) { - continue; - } - - val = osi_readla(osi_core, - (nveu8_t *)config->reg_addr[i]); - config->reg_val[i] = val & config->reg_mask[i]; - } - - osi_lock_init(&config->core_safety_lock); -} - -/** - * @brief Initialize the OSI core private data backup config array - * - * @note - * Algorithm: - * - Populate the list of core registers to be saved during suspend. - * Fill the address of each register in structure. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @param[in] osi_core: OSI core private data structure. - */ -static void eqos_core_backup_init(struct osi_core_priv_data *const osi_core) -{ - struct core_backup *config = &osi_core->backup_config; - nveu8_t *base = (nveu8_t *)osi_core->base; - nveu32_t i; - - /* MAC registers backup */ - config->reg_addr[EQOS_MAC_MCR_BAK_IDX] = base + EQOS_MAC_MCR; - config->reg_addr[EQOS_MAC_EXTR_BAK_IDX] = base + EQOS_MAC_EXTR; - config->reg_addr[EQOS_MAC_PFR_BAK_IDX] = base + EQOS_MAC_PFR; - config->reg_addr[EQOS_MAC_VLAN_TAG_BAK_IDX] = base + - EQOS_MAC_VLAN_TAG; - config->reg_addr[EQOS_MAC_VLANTIR_BAK_IDX] = base + EQOS_MAC_VLANTIR; - config->reg_addr[EQOS_MAC_RX_FLW_CTRL_BAK_IDX] = base + - EQOS_MAC_RX_FLW_CTRL; - config->reg_addr[EQOS_MAC_RQC0R_BAK_IDX] = base + EQOS_MAC_RQC0R; - config->reg_addr[EQOS_MAC_RQC1R_BAK_IDX] = base + EQOS_MAC_RQC1R; - config->reg_addr[EQOS_MAC_RQC2R_BAK_IDX] = base + EQOS_MAC_RQC2R; - config->reg_addr[EQOS_MAC_ISR_BAK_IDX] = base + EQOS_MAC_ISR; - config->reg_addr[EQOS_MAC_IMR_BAK_IDX] = base + EQOS_MAC_IMR; - config->reg_addr[EQOS_MAC_PMTCSR_BAK_IDX] = base + EQOS_MAC_PMTCSR; - config->reg_addr[EQOS_MAC_LPI_CSR_BAK_IDX] = base + EQOS_MAC_LPI_CSR; - config->reg_addr[EQOS_MAC_LPI_TIMER_CTRL_BAK_IDX] = base + - EQOS_MAC_LPI_TIMER_CTRL; - config->reg_addr[EQOS_MAC_LPI_EN_TIMER_BAK_IDX] = base + - EQOS_MAC_LPI_EN_TIMER; - config->reg_addr[EQOS_MAC_ANS_BAK_IDX] = base + EQOS_MAC_ANS; - config->reg_addr[EQOS_MAC_PCS_BAK_IDX] = base + EQOS_MAC_PCS; - if (osi_core->mac_ver == OSI_EQOS_MAC_5_00) { - config->reg_addr[EQOS_5_00_MAC_ARPPA_BAK_IDX] = base + - EQOS_5_00_MAC_ARPPA; - } - config->reg_addr[EQOS_MMC_CNTRL_BAK_IDX] = base + EQOS_MMC_CNTRL; - if (osi_core->mac_ver == OSI_EQOS_MAC_4_10) { - config->reg_addr[EQOS_4_10_MAC_ARPPA_BAK_IDX] = base + - EQOS_4_10_MAC_ARPPA; - } - config->reg_addr[EQOS_MAC_TCR_BAK_IDX] = base + EQOS_MAC_TCR; - config->reg_addr[EQOS_MAC_SSIR_BAK_IDX] = base + EQOS_MAC_SSIR; - config->reg_addr[EQOS_MAC_STSR_BAK_IDX] = base + EQOS_MAC_STSR; - config->reg_addr[EQOS_MAC_STNSR_BAK_IDX] = base + EQOS_MAC_STNSR; - config->reg_addr[EQOS_MAC_STSUR_BAK_IDX] = base + EQOS_MAC_STSUR; - config->reg_addr[EQOS_MAC_STNSUR_BAK_IDX] = base + EQOS_MAC_STNSUR; - config->reg_addr[EQOS_MAC_TAR_BAK_IDX] = base + EQOS_MAC_TAR; - config->reg_addr[EQOS_DMA_BMR_BAK_IDX] = base + EQOS_DMA_BMR; - config->reg_addr[EQOS_DMA_SBUS_BAK_IDX] = base + EQOS_DMA_SBUS; - config->reg_addr[EQOS_DMA_ISR_BAK_IDX] = base + EQOS_DMA_ISR; - config->reg_addr[EQOS_MTL_OP_MODE_BAK_IDX] = base + EQOS_MTL_OP_MODE; - config->reg_addr[EQOS_MTL_RXQ_DMA_MAP0_BAK_IDX] = base + - EQOS_MTL_RXQ_DMA_MAP0; - - for (i = 0; i < EQOS_MAX_HTR_REGS; i++) { - config->reg_addr[EQOS_MAC_HTR_REG_BAK_IDX(i)] = base + - EQOS_MAC_HTR_REG(i); - } - for (i = 0; i < OSI_EQOS_MAX_NUM_QUEUES; i++) { - config->reg_addr[EQOS_MAC_QX_TX_FLW_CTRL_BAK_IDX(i)] = base + - EQOS_MAC_QX_TX_FLW_CTRL(i); - } - for (i = 0; i < EQOS_MAX_MAC_ADDRESS_FILTER; i++) { - config->reg_addr[EQOS_MAC_ADDRH_BAK_IDX(i)] = base + - EQOS_MAC_ADDRH(i); - config->reg_addr[EQOS_MAC_ADDRL_BAK_IDX(i)] = base + - EQOS_MAC_ADDRL(i); - } - for (i = 0; i < EQOS_MAX_L3_L4_FILTER; i++) { - config->reg_addr[EQOS_MAC_L3L4_CTR_BAK_IDX(i)] = base + - EQOS_MAC_L3L4_CTR(i); - config->reg_addr[EQOS_MAC_L4_ADR_BAK_IDX(i)] = base + - EQOS_MAC_L4_ADR(i); - config->reg_addr[EQOS_MAC_L3_AD0R_BAK_IDX(i)] = base + - EQOS_MAC_L3_AD0R(i); - config->reg_addr[EQOS_MAC_L3_AD1R_BAK_IDX(i)] = base + - EQOS_MAC_L3_AD1R(i); - config->reg_addr[EQOS_MAC_L3_AD2R_BAK_IDX(i)] = base + - EQOS_MAC_L3_AD2R(i); - config->reg_addr[EQOS_MAC_L3_AD3R_BAK_IDX(i)] = base + - EQOS_MAC_L3_AD3R(i); - } - for (i = 0; i < OSI_EQOS_MAX_NUM_QUEUES; i++) { - config->reg_addr[EQOS_MTL_CHX_TX_OP_MODE_BAK_IDX(i)] = base + - EQOS_MTL_CHX_TX_OP_MODE(i); - config->reg_addr[EQOS_MTL_TXQ_ETS_CR_BAK_IDX(i)] = base + - EQOS_MTL_TXQ_ETS_CR(i); - config->reg_addr[EQOS_MTL_TXQ_QW_BAK_IDX(i)] = base + - EQOS_MTL_TXQ_QW(i); - config->reg_addr[EQOS_MTL_TXQ_ETS_SSCR_BAK_IDX(i)] = base + - EQOS_MTL_TXQ_ETS_SSCR(i); - config->reg_addr[EQOS_MTL_TXQ_ETS_HCR_BAK_IDX(i)] = base + - EQOS_MTL_TXQ_ETS_HCR(i); - config->reg_addr[EQOS_MTL_TXQ_ETS_LCR_BAK_IDX(i)] = base + - EQOS_MTL_TXQ_ETS_LCR(i); - config->reg_addr[EQOS_MTL_CHX_RX_OP_MODE_BAK_IDX(i)] = base + - EQOS_MTL_CHX_RX_OP_MODE(i); - } - - /* Wrapper register backup */ - config->reg_addr[EQOS_CLOCK_CTRL_0_BAK_IDX] = base + - EQOS_CLOCK_CTRL_0; - config->reg_addr[EQOS_AXI_ASID_CTRL_BAK_IDX] = base + - EQOS_AXI_ASID_CTRL; - config->reg_addr[EQOS_PAD_CRTL_BAK_IDX] = base + EQOS_PAD_CRTL; - config->reg_addr[EQOS_PAD_AUTO_CAL_CFG_BAK_IDX] = base + - EQOS_PAD_AUTO_CAL_CFG; -} - +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_config_flow_control - Configure MAC flow control settings * @@ -430,7 +73,7 @@ static nve32_t eqos_config_flow_control( /* return on invalid argument */ if (flw_ctrl > (OSI_FLOW_CTRL_RX | OSI_FLOW_CTRL_TX)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "flw_ctr: invalid input\n", 0ULL); return -1; } @@ -455,9 +98,7 @@ static nve32_t eqos_config_flow_control( } /* Write to MAC Tx Flow control Register of Q0 */ - eqos_core_safety_writel(osi_core, val, (nveu8_t *)addr + - EQOS_MAC_QX_TX_FLW_CTRL(0U), - EQOS_MAC_Q0_TXFC_IDX); + osi_writela(osi_core, val, (nveu8_t *)addr + EQOS_MAC_QX_TX_FLW_CTRL(0U)); /* Configure MAC Rx Flow control*/ /* Read MAC Rx Flow control Register */ @@ -481,22 +122,32 @@ static nve32_t eqos_config_flow_control( return 0; } +#endif /* !OSI_STRIPPED_LIB */ +#ifdef UPDATED_PAD_CAL /** - * @brief eqos_config_fw_err_pkts - Configure forwarding of error packets + * @brief eqos_pad_calibrate - performs PAD calibration * * @note * Algorithm: - * - Validate fw_err and return -1 if fails. - * - Enable or disable forward error packet confiration based on fw_err. - * - Refer to EQOS column of <> for API details. - * - TraceID: ETHERNET_NVETHERNETRM_020 + * - Set field PAD_E_INPUT_OR_E_PWRD in reg ETHER_QOS_SDMEMCOMPPADCTRL_0 + * - Delay for 1 usec. + * - Set AUTO_CAL_ENABLE and AUTO_CAL_START in reg + * ETHER_QOS_AUTO_CAL_CONFIG_0 + * - Wait on AUTO_CAL_ACTIVE until it is 0 for a loop of 1000 with a sleep of 10 microsecond + * between itertions. + * - Re-program the value PAD_E_INPUT_OR_E_PWRD in + * ETHER_QOS_SDMEMCOMPPADCTRL_0 to save power + * - return 0 if wait for AUTO_CAL_ACTIVE is success else -1. + * - Refer to EQOS column of <> for API details. + * - TraceID:ETHERNET_NVETHERNETRM_013 * - * @param[in] osi_core: OSI core private data structure. Used param base. - * @param[in] qinx: Queue index. Max value OSI_EQOS_MAX_NUM_CHANS-1. - * @param[in] fw_err: Enable(OSI_ENABLE) or Disable(OSI_DISABLE) the forwarding of error packets + * @param[in] osi_core: OSI core private data structure. Used param is base, osd_ops.usleep_range. * - * @pre MAC should be initialized and started. see osi_start_mac() + * @pre + * - MAC should out of reset and clocks enabled. + * - RGMII and MDIO interface needs to be IDLE before performing PAD + * calibration. * * @note * API Group: @@ -507,127 +158,100 @@ static nve32_t eqos_config_flow_control( * @retval 0 on success * @retval -1 on failure. */ -static nve32_t eqos_config_fw_err_pkts( - struct osi_core_priv_data *const osi_core, - const nveu32_t qinx, - const nveu32_t fw_err) +static nve32_t eqos_pad_calibrate(struct osi_core_priv_data *const osi_core) { - void *addr = osi_core->base; - nveu32_t val; - - /* Check for valid fw_err and qinx values */ - if (((fw_err != OSI_ENABLE) && (fw_err != OSI_DISABLE)) || - (qinx >= OSI_EQOS_MAX_NUM_CHANS)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "config_fw_err: invalid input\n", 0ULL); - return -1; - } - - /* Read MTL RXQ Operation_Mode Register */ - val = osi_readla(osi_core, - (nveu8_t *)addr + EQOS_MTL_CHX_RX_OP_MODE(qinx)); + void *ioaddr = osi_core->base; + nveu32_t retry = RETRY_COUNT; + nveu32_t count; + nve32_t cond = COND_NOT_MET, ret = 0; + nveu32_t value; - /* fw_err, 1 is for enable and 0 is for disable */ - if (fw_err == OSI_ENABLE) { - /* When fw_err bit is set, all packets except the runt error - * packets are forwarded to the application or DMA. - */ - val |= EQOS_MTL_RXQ_OP_MODE_FEP; - } else if (fw_err == OSI_DISABLE) { - /* When this bit is reset, the Rx queue drops packets with error - * status (CRC error, GMII_ER, watchdog timeout, or overflow) - */ - val &= ~EQOS_MTL_RXQ_OP_MODE_FEP; - } else { - /* Nothing here */ + __sync_val_compare_and_swap(&osi_core->padctrl.is_pad_cal_in_progress, + OSI_DISABLE, OSI_ENABLE); + ret = eqos_pre_pad_calibrate(osi_core); + if (ret < 0) { + ret = -1; + goto error; } - - /* Write to FEP bit of MTL RXQ operation Mode Register to enable or - * disable the forwarding of error packets to DMA or application. + /* 1. Set field PAD_E_INPUT_OR_E_PWRD in + * reg ETHER_QOS_SDMEMCOMPPADCTRL_0 */ - eqos_core_safety_writel(osi_core, val, (nveu8_t *)addr + - EQOS_MTL_CHX_RX_OP_MODE(qinx), - EQOS_MTL_CH0_RX_OP_MODE_IDX + qinx); + value = osi_readla(osi_core, (nveu8_t *)ioaddr + EQOS_PAD_CRTL); + value |= EQOS_PAD_CRTL_E_INPUT_OR_E_PWRD; + osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_CRTL); - return 0; -} + /* 2. delay for 1 to 3 usec */ + osi_core->osd_ops.usleep_range(1, 3); -/** - * @brief eqos_poll_for_swr - Poll for software reset (SWR bit in DMA Mode) - * - * @note - * Algorithm: - * - Waits for SWR reset to be cleared in DMA Mode register for max polling count of 1000. - * - Sleeps for 1 milli sec for each iteration. - * - Refer to EQOS column of <> for API details. - * - TraceID: ETHERNET_NVETHERNETRM_004 - * - * @param[in] osi_core: OSI core private data structure.Used param base, osd_ops.usleep_range. - * - * @pre MAC needs to be out of reset and proper clock configured. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success if reset is success - * @retval -1 on if reset didnot happen in timeout. - */ -static nve32_t eqos_poll_for_swr(struct osi_core_priv_data *const osi_core) -{ - void *addr = osi_core->base; - nveu32_t retry = RETRY_COUNT; - nveu32_t count; - nveu32_t dma_bmr = 0; - nve32_t cond = COND_NOT_MET; - nveu32_t pre_si = osi_core->pre_si; + /* 3. Set AUTO_CAL_ENABLE and AUTO_CAL_START in + * reg ETHER_QOS_AUTO_CAL_CONFIG_0. + * Set pad_auto_cal pd/pu offset values + */ + value = osi_readla(osi_core, + (nveu8_t *)ioaddr + EQOS_PAD_AUTO_CAL_CFG); + value &= ~EQOS_PAD_CRTL_PU_OFFSET_MASK; + value &= ~EQOS_PAD_CRTL_PD_OFFSET_MASK; + value |= osi_core->padctrl.pad_auto_cal_pu_offset; + value |= (osi_core->padctrl.pad_auto_cal_pd_offset << 8U); + value |= EQOS_PAD_AUTO_CAL_CFG_START | + EQOS_PAD_AUTO_CAL_CFG_ENABLE; + osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_AUTO_CAL_CFG); - if (pre_si == OSI_ENABLE) { - osi_writela(osi_core, OSI_ENABLE, - (nveu8_t *)addr + EQOS_DMA_BMR); - } - /* add delay of 10 usec */ - osi_core->osd_ops.usleep_range(9, 11); + /* 4. Wait on 10 to 12 us before start checking for calibration done. + * This delay is consumed in delay inside while loop. + */ - /* Poll Until Poll Condition */ + /* 5. Wait on AUTO_CAL_ACTIVE until it is 0. 10ms is the timeout */ count = 0; while (cond == COND_NOT_MET) { if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "poll_for_swr: timeout\n", 0ULL); - return -1; + goto calibration_failed; } - count++; - - - dma_bmr = osi_readla(osi_core, - (nveu8_t *)addr + EQOS_DMA_BMR); - if ((dma_bmr & EQOS_DMA_BMR_SWR) != EQOS_DMA_BMR_SWR) { + osi_core->osd_ops.usleep_range(10, 12); + value = osi_readla(osi_core, (nveu8_t *)ioaddr + + EQOS_PAD_AUTO_CAL_STAT); + /* calibration done when CAL_STAT_ACTIVE is zero */ + if ((value & EQOS_PAD_AUTO_CAL_STAT_ACTIVE) == 0U) { cond = COND_MET; - } else { - osi_core->osd_ops.msleep(1U); } } - return 0; +calibration_failed: + /* 6. Re-program the value PAD_E_INPUT_OR_E_PWRD in + * ETHER_QOS_SDMEMCOMPPADCTRL_0 to save power + */ + value = osi_readla(osi_core, (nveu8_t *)ioaddr + EQOS_PAD_CRTL); + value &= ~EQOS_PAD_CRTL_E_INPUT_OR_E_PWRD; + osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_CRTL); + ret = eqos_post_pad_calibrate(osi_core) < 0 ? -1 : ret; +error: + __sync_val_compare_and_swap(&osi_core->padctrl.is_pad_cal_in_progress, + OSI_ENABLE, OSI_DISABLE); + + return ret; } +#else /** - * @brief eqos_set_speed - Set operating speed + * @brief eqos_pad_calibrate - PAD calibration * * @note * Algorithm: - * - Based on the speed (10/100/1000Mbps) MAC will be configured - * accordingly. - * - If invalid value for speed, configure for 1000Mbps. - * - Refer to EQOS column of <> for API details. - * - TraceID: ETHERNET_NVETHERNETRM_012 + * - Set field PAD_E_INPUT_OR_E_PWRD in reg ETHER_QOS_SDMEMCOMPPADCTRL_0 + * - Delay for 1 usec. + * - Set AUTO_CAL_ENABLE and AUTO_CAL_START in reg + * ETHER_QOS_AUTO_CAL_CONFIG_0 + * - Wait on AUTO_CAL_ACTIVE until it is 0 + * - Re-program the value PAD_E_INPUT_OR_E_PWRD in + * ETHER_QOS_SDMEMCOMPPADCTRL_0 to save power + * + * @param[in] osi_core: OSI core private data structure. * - * @param[in] base: EQOS virtual base address. - * @param[in] speed: Operating speed. Valid values are OSI_SPEED_* + * @note + * - MAC should out of reset and clocks enabled. + * - RGMII and MDIO interface needs to be IDLE before performing PAD + * calibration. * * @note * API Group: @@ -635,284 +259,48 @@ static nve32_t eqos_poll_for_swr(struct osi_core_priv_data *const osi_core) * - Run time: Yes * - De-initialization: No * - * @pre MAC should be initialized and started. see osi_start_mac() + * @retval 0 on success + * @retval -1 on failure. */ -static int eqos_set_speed(struct osi_core_priv_data *const osi_core, - const nve32_t speed) +static nve32_t eqos_pad_calibrate(struct osi_core_priv_data *const osi_core) { - nveu32_t mcr_val; - void *base = osi_core->base; + void *ioaddr = osi_core->base; + nveu32_t retry = RETRY_COUNT; + nveu32_t count; + nve32_t cond = COND_NOT_MET, ret = 0; + nveu32_t value; - mcr_val = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_MCR); - switch (speed) { - default: - mcr_val &= ~EQOS_MCR_PS; - mcr_val &= ~EQOS_MCR_FES; - break; - case OSI_SPEED_1000: - mcr_val &= ~EQOS_MCR_PS; - mcr_val &= ~EQOS_MCR_FES; - break; - case OSI_SPEED_100: - mcr_val |= EQOS_MCR_PS; - mcr_val |= EQOS_MCR_FES; - break; - case OSI_SPEED_10: - mcr_val |= EQOS_MCR_PS; - mcr_val &= ~EQOS_MCR_FES; - break; - } - - eqos_core_safety_writel(osi_core, mcr_val, - (unsigned char *)osi_core->base + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); - return 0; -} - -/** - * @brief eqos_set_mode - Set operating mode - * - * @note - * Algorithm: - * - Based on the mode (HALF/FULL Duplex) MAC will be configured - * accordingly. - * - If invalid value for mode, return -1. - * - Refer to EQOS column of <> for API details. - * - TraceID: ETHERNET_NVETHERNETRM_011 - * - * @param[in] osi_core: OSI core private data structure. used param is base. - * @param[in] mode: Operating mode. (OSI_FULL_DUPLEX/OSI_HALF_DUPLEX) - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_set_mode(struct osi_core_priv_data *const osi_core, - const nve32_t mode) -{ - void *base = osi_core->base; - nveu32_t mcr_val; - - mcr_val = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_MCR); - if (mode == OSI_FULL_DUPLEX) { - mcr_val |= EQOS_MCR_DM; - /* DO (disable receive own) bit is not applicable, don't care */ - mcr_val &= ~EQOS_MCR_DO; - } else if (mode == OSI_HALF_DUPLEX) { - mcr_val &= ~EQOS_MCR_DM; - /* Set DO (disable receive own) bit */ - mcr_val |= EQOS_MCR_DO; - } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "set_mode: invalid mode\n", 0ULL); - return -1; - /* Nothing here */ - } - eqos_core_safety_writel(osi_core, mcr_val, - (nveu8_t *)base + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); - return 0; -} - -/** - * @brief eqos_calculate_per_queue_fifo - Calculate per queue FIFO size - * - * @note - * Algorithm: - * - Identify Total Tx/Rx HW FIFO size in KB based on fifo_size - * - Divide the same for each queue. - * - Correct the size to its nearest value of 256B to 32K with next correction value - * which is a 2power(2^x). - * - Correct for 9K and Max of 36K also. - * - i.e if share is >256 and < 512, set it to 256. - * - SWUD_ID: ETHERNET_NVETHERNETRM_006_1 - * - * @param[in] mac_ver: MAC version value. - * @param[in] fifo_size: Total Tx/RX HW FIFO size. - * @param[in] queue_count: Total number of Queues configured. - * - * @pre MAC has to be out of reset. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval Queue size that need to be programmed. - */ -static nveu32_t eqos_calculate_per_queue_fifo(nveu32_t mac_ver, - nveu32_t fifo_size, - nveu32_t queue_count) -{ - nveu32_t q_fifo_size = 0; /* calculated fifo size per queue */ - nveu32_t p_fifo = EQOS_256; /* per queue fifo size program value */ - - if (queue_count == 0U) { - return 0U; - } - - /* calculate Tx/Rx fifo share per queue */ - switch (fifo_size) { - case 0: - q_fifo_size = FIFO_SIZE_B(128U); - break; - case 1: - q_fifo_size = FIFO_SIZE_B(256U); - break; - case 2: - q_fifo_size = FIFO_SIZE_B(512U); - break; - case 3: - q_fifo_size = FIFO_SIZE_KB(1U); - break; - case 4: - q_fifo_size = FIFO_SIZE_KB(2U); - break; - case 5: - q_fifo_size = FIFO_SIZE_KB(4U); - break; - case 6: - q_fifo_size = FIFO_SIZE_KB(8U); - break; - case 7: - q_fifo_size = FIFO_SIZE_KB(16U); - break; - case 8: - q_fifo_size = FIFO_SIZE_KB(32U); - break; - case 9: - if (mac_ver == OSI_EQOS_MAC_5_30) { - q_fifo_size = FIFO_SIZE_KB(64U); - } else { - q_fifo_size = FIFO_SIZE_KB(36U); - } - break; - case 10: - q_fifo_size = FIFO_SIZE_KB(128U); - break; - case 11: - q_fifo_size = FIFO_SIZE_KB(256U); - break; - default: - q_fifo_size = FIFO_SIZE_KB(36U); - break; - } - - q_fifo_size = q_fifo_size / queue_count; - - if (q_fifo_size >= FIFO_SIZE_KB(36U)) { - p_fifo = EQOS_36K; - } else if (q_fifo_size >= FIFO_SIZE_KB(32U)) { - p_fifo = EQOS_32K; - } else if (q_fifo_size >= FIFO_SIZE_KB(16U)) { - p_fifo = EQOS_16K; - } else if (q_fifo_size == FIFO_SIZE_KB(9U)) { - p_fifo = EQOS_9K; - } else if (q_fifo_size >= FIFO_SIZE_KB(8U)) { - p_fifo = EQOS_8K; - } else if (q_fifo_size >= FIFO_SIZE_KB(4U)) { - p_fifo = EQOS_4K; - } else if (q_fifo_size >= FIFO_SIZE_KB(2U)) { - p_fifo = EQOS_2K; - } else if (q_fifo_size >= FIFO_SIZE_KB(1U)) { - p_fifo = EQOS_1K; - } else if (q_fifo_size >= FIFO_SIZE_B(512U)) { - p_fifo = EQOS_512; - } else if (q_fifo_size >= FIFO_SIZE_B(256U)) { - p_fifo = EQOS_256; - } else { - /* Nothing here */ - } - - return p_fifo; -} - -#ifdef UPDATED_PAD_CAL -/** - * @brief eqos_pad_calibrate - performs PAD calibration - * - * @note - * Algorithm: - * - Set field PAD_E_INPUT_OR_E_PWRD in reg ETHER_QOS_SDMEMCOMPPADCTRL_0 - * - Delay for 1 usec. - * - Set AUTO_CAL_ENABLE and AUTO_CAL_START in reg - * ETHER_QOS_AUTO_CAL_CONFIG_0 - * - Wait on AUTO_CAL_ACTIVE until it is 0 for a loop of 1000 with a sleep of 10 microsecond - * between itertions. - * - Re-program the value PAD_E_INPUT_OR_E_PWRD in - * ETHER_QOS_SDMEMCOMPPADCTRL_0 to save power - * - return 0 if wait for AUTO_CAL_ACTIVE is success else -1. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_013 - * - * @param[in] osi_core: OSI core private data structure. Used param is base, osd_ops.usleep_range. - * - * @pre - * - MAC should out of reset and clocks enabled. - * - RGMII and MDIO interface needs to be IDLE before performing PAD - * calibration. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_pad_calibrate(struct osi_core_priv_data *const osi_core) -{ - void *ioaddr = osi_core->base; - nveu32_t retry = RETRY_COUNT; - nveu32_t count; - nve32_t cond = COND_NOT_MET, ret = 0; - nveu32_t value; - - __sync_val_compare_and_swap(&osi_core->padctrl.is_pad_cal_in_progress, - OSI_DISABLE, OSI_ENABLE); - ret = eqos_pre_pad_calibrate(osi_core); - if (ret < 0) { - ret = -1; - goto error; - } /* 1. Set field PAD_E_INPUT_OR_E_PWRD in * reg ETHER_QOS_SDMEMCOMPPADCTRL_0 */ value = osi_readla(osi_core, (nveu8_t *)ioaddr + EQOS_PAD_CRTL); value |= EQOS_PAD_CRTL_E_INPUT_OR_E_PWRD; osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_CRTL); - - /* 2. delay for 1 to 3 usec */ + /* 2. delay for 1 usec */ osi_core->osd_ops.usleep_range(1, 3); - /* 3. Set AUTO_CAL_ENABLE and AUTO_CAL_START in * reg ETHER_QOS_AUTO_CAL_CONFIG_0. + * Set pad_auto_cal pd/pu offset values */ + value = osi_readla(osi_core, (nveu8_t *)ioaddr + EQOS_PAD_AUTO_CAL_CFG); + value &= ~EQOS_PAD_CRTL_PU_OFFSET_MASK; + value &= ~EQOS_PAD_CRTL_PD_OFFSET_MASK; + value |= osi_core->padctrl.pad_auto_cal_pu_offset; + value |= (osi_core->padctrl.pad_auto_cal_pd_offset << 8U); value |= EQOS_PAD_AUTO_CAL_CFG_START | EQOS_PAD_AUTO_CAL_CFG_ENABLE; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)ioaddr + - EQOS_PAD_AUTO_CAL_CFG, - EQOS_PAD_AUTO_CAL_CFG_IDX); + osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_AUTO_CAL_CFG); - /* 4. Wait on 10 to 12 us before start checking for calibration done. + /* 4. Wait on 1 to 3 us before start checking for calibration done. * This delay is consumed in delay inside while loop. */ - /* 5. Wait on AUTO_CAL_ACTIVE until it is 0. 10ms is the timeout */ count = 0; while (cond == COND_NOT_MET) { if (count > retry) { + ret = -1; goto calibration_failed; } count++; @@ -924,7 +312,6 @@ static nve32_t eqos_pad_calibrate(struct osi_core_priv_data *const osi_core) cond = COND_MET; } } - calibration_failed: /* 6. Re-program the value PAD_E_INPUT_OR_E_PWRD in * ETHER_QOS_SDMEMCOMPPADCTRL_0 to save power @@ -932,455 +319,156 @@ static nve32_t eqos_pad_calibrate(struct osi_core_priv_data *const osi_core) value = osi_readla(osi_core, (nveu8_t *)ioaddr + EQOS_PAD_CRTL); value &= ~EQOS_PAD_CRTL_E_INPUT_OR_E_PWRD; osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_CRTL); - ret = eqos_post_pad_calibrate(osi_core) < 0 ? -1 : ret; -error: - __sync_val_compare_and_swap(&osi_core->padctrl.is_pad_cal_in_progress, - OSI_ENABLE, OSI_DISABLE); - return ret; } +#endif /* UPDATED_PAD_CAL */ -#else +/** \cond DO_NOT_DOCUMENT */ /** - * @brief eqos_pad_calibrate - PAD calibration + * @brief eqos_configure_mtl_queue - Configure MTL Queue * * @note * Algorithm: - * - Set field PAD_E_INPUT_OR_E_PWRD in reg ETHER_QOS_SDMEMCOMPPADCTRL_0 - * - Delay for 1 usec. - * - Set AUTO_CAL_ENABLE and AUTO_CAL_START in reg - * ETHER_QOS_AUTO_CAL_CONFIG_0 - * - Wait on AUTO_CAL_ACTIVE until it is 0 - * - Re-program the value PAD_E_INPUT_OR_E_PWRD in - * ETHER_QOS_SDMEMCOMPPADCTRL_0 to save power + * - This takes care of configuring the below + * parameters for the MTL Queue + * - Mapping MTL Rx queue and DMA Rx channel + * - Flush TxQ + * - Enable Store and Forward mode for Tx, Rx + * - Configure Tx and Rx MTL Queue sizes + * - Configure TxQ weight + * - Enable Rx Queues * - * @param[in] osi_core: OSI core private data structure. + * @param[in] qinx: Queue number that need to be configured. + * @param[in] osi_core: OSI core private data. + * @param[in] tx_fifo: MTL TX queue size for a MTL queue. + * @param[in] rx_fifo: MTL RX queue size for a MTL queue. * - * @note - * - MAC should out of reset and clocks enabled. - * - RGMII and MDIO interface needs to be IDLE before performing PAD - * calibration. + * @pre MAC has to be out of reset. * * @note * API Group: * - Initialization: Yes - * - Run time: Yes + * - Run time: No * - De-initialization: No * * @retval 0 on success * @retval -1 on failure. */ -static nve32_t eqos_pad_calibrate(struct osi_core_priv_data *const osi_core) +static nve32_t eqos_configure_mtl_queue(struct osi_core_priv_data *const osi_core, + nveu32_t q_inx) { - void *ioaddr = osi_core->base; - nveu32_t retry = RETRY_COUNT; - nveu32_t count; - nve32_t cond = COND_NOT_MET, ret = 0; - nveu32_t value; + const struct core_local *l_core = (struct core_local *)(void *)osi_core; + const nveu32_t rx_fifo_sz[2U][OSI_EQOS_MAX_NUM_QUEUES] = { + { FIFO_SZ(9U), FIFO_SZ(9U), FIFO_SZ(9U), FIFO_SZ(9U), + FIFO_SZ(1U), FIFO_SZ(1U), FIFO_SZ(1U), FIFO_SZ(1U) }, + { FIFO_SZ(36U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), + FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(16U) }, + }; + const nveu32_t tx_fifo_sz[2U][OSI_EQOS_MAX_NUM_QUEUES] = { + { FIFO_SZ(9U), FIFO_SZ(9U), FIFO_SZ(9U), FIFO_SZ(9U), + FIFO_SZ(1U), FIFO_SZ(1U), FIFO_SZ(1U), FIFO_SZ(1U) }, + { FIFO_SZ(8U), FIFO_SZ(8U), FIFO_SZ(8U), FIFO_SZ(8U), + FIFO_SZ(8U), FIFO_SZ(8U), FIFO_SZ(8U), FIFO_SZ(8U) }, + }; + const nveu32_t rfd_rfa[OSI_EQOS_MAX_NUM_QUEUES] = { + FULL_MINUS_16_K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + }; + nveu32_t l_macv = (l_core->l_mac_ver & 0x1U); + nveu32_t que_idx = (q_inx & 0x7U); + nveu32_t rx_fifo_sz_t = 0U; + nveu32_t tx_fifo_sz_t = 0U; + nveu32_t value = 0; + nve32_t ret = 0; - /* 1. Set field PAD_E_INPUT_OR_E_PWRD in - * reg ETHER_QOS_SDMEMCOMPPADCTRL_0 - */ - value = osi_readla(osi_core, (nveu8_t *)ioaddr + EQOS_PAD_CRTL); - value |= EQOS_PAD_CRTL_E_INPUT_OR_E_PWRD; - osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_CRTL); - /* 2. delay for 1 usec */ - osi_core->osd_ops.usleep_range(1, 3); - /* 3. Set AUTO_CAL_ENABLE and AUTO_CAL_START in - * reg ETHER_QOS_AUTO_CAL_CONFIG_0. - */ - value = osi_readla(osi_core, - (nveu8_t *)ioaddr + EQOS_PAD_AUTO_CAL_CFG); - value |= EQOS_PAD_AUTO_CAL_CFG_START | - EQOS_PAD_AUTO_CAL_CFG_ENABLE; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)ioaddr + - EQOS_PAD_AUTO_CAL_CFG, - EQOS_PAD_AUTO_CAL_CFG_IDX); - /* 4. Wait on 1 to 3 us before start checking for calibration done. - * This delay is consumed in delay inside while loop. - */ - /* 5. Wait on AUTO_CAL_ACTIVE until it is 0. 10ms is the timeout */ - count = 0; - while (cond == COND_NOT_MET) { - if (count > retry) { - ret = -1; - goto calibration_failed; - } - count++; - osi_core->osd_ops.usleep_range(10, 12); - value = osi_readla(osi_core, (nveu8_t *)ioaddr + - EQOS_PAD_AUTO_CAL_STAT); - /* calibration done when CAL_STAT_ACTIVE is zero */ - if ((value & EQOS_PAD_AUTO_CAL_STAT_ACTIVE) == 0U) { - cond = COND_MET; - } + tx_fifo_sz_t = tx_fifo_sz[l_macv][que_idx]; + + ret = hw_flush_mtl_tx_queue(osi_core, que_idx); + if (ret < 0) { + goto fail; } -calibration_failed: - /* 6. Re-program the value PAD_E_INPUT_OR_E_PWRD in - * ETHER_QOS_SDMEMCOMPPADCTRL_0 to save power + + value = (tx_fifo_sz_t << EQOS_MTL_TXQ_SIZE_SHIFT); + /* Enable Store and Forward mode */ + value |= EQOS_MTL_TSF; + /* Enable TxQ */ + value |= EQOS_MTL_TXQEN; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_CHX_TX_OP_MODE(que_idx)); + + /* read RX Q0 Operating Mode Register */ + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + EQOS_MTL_CHX_RX_OP_MODE(que_idx)); + + rx_fifo_sz_t = rx_fifo_sz[l_macv][que_idx]; + value |= (rx_fifo_sz_t << EQOS_MTL_RXQ_SIZE_SHIFT); + /* Enable Store and Forward mode */ + value |= EQOS_MTL_RSF; + /* Update EHFL, RFA and RFD + * EHFL: Enable HW Flow Control + * RFA: Threshold for Activating Flow Control + * RFD: Threshold for Deactivating Flow Control */ - value = osi_readla(osi_core, (nveu8_t *)ioaddr + EQOS_PAD_CRTL); - value &= ~EQOS_PAD_CRTL_E_INPUT_OR_E_PWRD; - osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_CRTL); + value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; + value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; + value |= EQOS_MTL_RXQ_OP_MODE_EHFC; + value |= (rfd_rfa[que_idx] << EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & + EQOS_MTL_RXQ_OP_MODE_RFD_MASK; + value |= (rfd_rfa[que_idx] << EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & + EQOS_MTL_RXQ_OP_MODE_RFA_MASK; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_CHX_RX_OP_MODE(que_idx)); + + /* Transmit Queue weight */ + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + EQOS_MTL_TXQ_QW(que_idx)); + value |= EQOS_MTL_TXQ_QW_ISCQW; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_TXQ_QW(que_idx)); + + /* Enable Rx Queue Control */ + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + EQOS_MAC_RQC0R); + value |= ((osi_core->rxq_ctrl[que_idx] & EQOS_RXQ_EN_MASK) << (que_idx * 2U)); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_RQC0R); + +fail: return ret; } -#endif /* UPDATED_PAD_CAL */ +/** \endcond */ /** - * @brief eqos_flush_mtl_tx_queue - Flush MTL Tx queue + * @brief eqos_config_frp - Enable/Disale RX Flexible Receive Parser in HW * - * @note * Algorithm: - * - Validate qinx for maximum value of OSI_EQOS_MAX_NUM_QUEUES and return -1 if fails. - * - Configure EQOS_MTL_CHX_TX_OP_MODE to flush corresponding MTL queue. - * - Wait on EQOS_MTL_QTOMR_FTQ_LPOS bit set for a loop of 1000 with a sleep of - * 1 milli second between itertions. - * - return 0 if EQOS_MTL_QTOMR_FTQ_LPOS is set else -1. - * - SWUD_ID: ETHERNET_NVETHERNETRM_006_2 + * 1) Read the MTL OP Mode configuration register. + * 2) Enable/Disable FRPE bit based on the input. + * 3) Write the MTL OP Mode configuration register. * - * @param[in] osi_core: OSI core private data structure. Used param base, osd_ops.msleep. - * @param[in] qinx: MTL queue index. Max value is OSI_EQOS_MAX_NUM_QUEUES-1. + * @param[in] osi_core: OSI core private data. + * @param[in] enabled: Flag to indicate feature is to be enabled/disabled. * - * @note - * - MAC should out of reset and clocks enabled. - * - hw core initialized. see osi_hw_core_init(). - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_flush_mtl_tx_queue( - struct osi_core_priv_data *const osi_core, - const nveu32_t qinx) -{ - void *addr = osi_core->base; - nveu32_t retry = RETRY_COUNT; - nveu32_t count; - nveu32_t value; - nve32_t cond = COND_NOT_MET; - - if (qinx >= OSI_EQOS_MAX_NUM_QUEUES) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "flush_mtl_tx_queue: invalid input\n", 0ULL); - return -1; - } - - /* Read Tx Q Operating Mode Register and flush TxQ */ - value = osi_readla(osi_core, (nveu8_t *)addr + - EQOS_MTL_CHX_TX_OP_MODE(qinx)); - value |= EQOS_MTL_QTOMR_FTQ; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)addr + - EQOS_MTL_CHX_TX_OP_MODE(qinx), - EQOS_MTL_CH0_TX_OP_MODE_IDX + qinx); - - /* Poll Until FTQ bit resets for Successful Tx Q flush */ - count = 0; - while (cond == COND_NOT_MET) { - if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "Poll FTQ bit timeout\n", 0ULL); - return -1; - } - - count++; - osi_core->osd_ops.msleep(1); - - value = osi_readla(osi_core, (nveu8_t *)addr + - EQOS_MTL_CHX_TX_OP_MODE(qinx)); - - if ((value & EQOS_MTL_QTOMR_FTQ_LPOS) == 0U) { - cond = COND_MET; - } - } - - return 0; -} - -/** - * @brief update_ehfc_rfa_rfd - Update EHFC, RFD and RSA values - * - * @note - * Algorithm: - * - Caculates and stores the RSD (Threshold for Deactivating - * Flow control) and RSA (Threshold for Activating Flow Control) values - * based on the Rx FIFO size and also enables HW flow control. - * - Maping detials for rx_fifo are:(minimum EQOS_4K) - * - EQOS_4K, configure FULL_MINUS_2_5K for RFD and FULL_MINUS_1_5K for RFA - * - EQOS_8K, configure FULL_MINUS_4_K for RFD and FULL_MINUS_6_K for RFA - * - EQOS_16K, configure FULL_MINUS_4_K for RFD and FULL_MINUS_10_K for RFA - * - EQOS_32K, configure FULL_MINUS_4_K for RFD and FULL_MINUS_16_K for RFA - * - EQOS_9K/Deafult, configure FULL_MINUS_3_K for RFD and FULL_MINUS_2_K for RFA - * - SWUD_ID: ETHERNET_NVETHERNETRM_006_3 - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @param[in] rx_fifo: Rx FIFO size. - * @param[out] value: Stores RFD and RSA values - */ -void update_ehfc_rfa_rfd(nveu32_t rx_fifo, nveu32_t *value) -{ - if (rx_fifo >= EQOS_4K) { - /* Enable HW Flow Control */ - *value |= EQOS_MTL_RXQ_OP_MODE_EHFC; - - switch (rx_fifo) { - case EQOS_4K: - /* Update RFD */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_2_5K << - EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_1_5K << - EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case EQOS_8K: - /* Update RFD */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_6_K << - EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case EQOS_9K: - /* Update RFD */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_3_K << - EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_2_K << - EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case EQOS_16K: - /* Update RFD */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_10_K << - EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case EQOS_32K: - /* Update RFD */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_16_K << - EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - break; - default: - /* Use 9K values */ - /* Update RFD */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_3_K << - EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_2_K << - EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT) & - EQOS_MTL_RXQ_OP_MODE_RFA_MASK; - break; - } - } -} - -/** \cond DO_NOT_DOCUMENT */ -/** - * @brief eqos_configure_mtl_queue - Configure MTL Queue - * - * @note - * Algorithm: - * - This takes care of configuring the below - * parameters for the MTL Queue - * - Mapping MTL Rx queue and DMA Rx channel - * - Flush TxQ - * - Enable Store and Forward mode for Tx, Rx - * - Configure Tx and Rx MTL Queue sizes - * - Configure TxQ weight - * - Enable Rx Queues - * - * @param[in] qinx: Queue number that need to be configured. - * @param[in] osi_core: OSI core private data. - * @param[in] tx_fifo: MTL TX queue size for a MTL queue. - * @param[in] rx_fifo: MTL RX queue size for a MTL queue. - * - * @pre MAC has to be out of reset. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No + * @note MAC should be init and started. see osi_start_mac() * * @retval 0 on success * @retval -1 on failure. */ -static nve32_t eqos_configure_mtl_queue(nveu32_t qinx, - struct osi_core_priv_data *const osi_core, - nveu32_t tx_fifo, - nveu32_t rx_fifo) +static nve32_t eqos_config_frp(struct osi_core_priv_data *const osi_core, + const nveu32_t enabled) { - nveu32_t value = 0; + nveu8_t *base = osi_core->base; + nveu32_t op_mode = 0U, val = 0U; nve32_t ret = 0; - ret = eqos_flush_mtl_tx_queue(osi_core, qinx); - if (ret < 0) { - return ret; - } - - value = (tx_fifo << EQOS_MTL_TXQ_SIZE_SHIFT); - /* Enable Store and Forward mode */ - value |= EQOS_MTL_TSF; - /* Enable TxQ */ - value |= EQOS_MTL_TXQEN; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MTL_CHX_TX_OP_MODE(qinx), - EQOS_MTL_CH0_TX_OP_MODE_IDX + qinx); - - /* read RX Q0 Operating Mode Register */ - value = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_MTL_CHX_RX_OP_MODE(qinx)); - value |= (rx_fifo << EQOS_MTL_RXQ_SIZE_SHIFT); - /* Enable Store and Forward mode */ - value |= EQOS_MTL_RSF; - /* Update EHFL, RFA and RFD - * EHFL: Enable HW Flow Control - * RFA: Threshold for Activating Flow Control - * RFD: Threshold for Deactivating Flow Control - */ - update_ehfc_rfa_rfd(rx_fifo, &value); - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MTL_CHX_RX_OP_MODE(qinx), - EQOS_MTL_CH0_RX_OP_MODE_IDX + qinx); - - /* Transmit Queue weight */ - value = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_MTL_TXQ_QW(qinx)); - value |= (EQOS_MTL_TXQ_QW_ISCQW + qinx); - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MTL_TXQ_QW(qinx), - EQOS_MTL_TXQ0_QW_IDX + qinx); - - /* Enable Rx Queue Control */ - value = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_MAC_RQC0R); - value |= ((osi_core->rxq_ctrl[qinx] & EQOS_RXQ_EN_MASK) << (qinx * 2U)); - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_RQC0R, EQOS_MAC_RQC0R_IDX); - - return 0; -} -/** \endcond */ - -/** - * @brief eqos_config_rxcsum_offload - Enable/Disable rx checksum offload in HW - * - * @note - * Algorithm: - * - VAlidate enabled param and return -1 if invalid. - * - Read the MAC configuration register. - * - Enable/disable the IP checksum offload engine COE in MAC receiver based on enabled. - * - Update the MAC configuration register. - * - Refer to OSI column of <> for sequence - * of execution. - * - TraceID:ETHERNET_NVETHERNETRM_017 - * - * @param[in] osi_core: OSI core private data structure. Used param is base. - * @param[in] enabled: Flag to indicate feature is to be enabled(OSI_ENABLE)/disabled(OSI_DISABLE). - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_config_rxcsum_offload( - struct osi_core_priv_data *const osi_core, - const nveu32_t enabled) -{ - void *addr = osi_core->base; - nveu32_t mac_mcr; - if ((enabled != OSI_ENABLE) && (enabled != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "rxsum_offload: invalid input\n", 0ULL); - return -1; - } - - mac_mcr = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_MCR); - - if (enabled == OSI_ENABLE) { - mac_mcr |= EQOS_MCR_IPC; - } else { - mac_mcr &= ~EQOS_MCR_IPC; - } - - eqos_core_safety_writel(osi_core, mac_mcr, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); - - return 0; -} - -/** - * @brief eqos_config_frp - Enable/Disale RX Flexible Receive Parser in HW - * - * Algorithm: - * 1) Read the MTL OP Mode configuration register. - * 2) Enable/Disable FRPE bit based on the input. - * 3) Write the MTL OP Mode configuration register. - * - * @param[in] osi_core: OSI core private data. - * @param[in] enabled: Flag to indicate feature is to be enabled/disabled. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int eqos_config_frp(struct osi_core_priv_data *const osi_core, - const unsigned int enabled) -{ - unsigned char *base = osi_core->base; - unsigned int op_mode = 0U, val = 0U; - int ret = 0; - - if (enabled != OSI_ENABLE && enabled != OSI_DISABLE) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid enable input\n", enabled); - return -1; + ret = -1; + goto done; } /* Disable RE */ @@ -1435,6 +523,7 @@ static int eqos_config_frp(struct osi_core_priv_data *const osi_core, val |= EQOS_MCR_RE; osi_writela(osi_core, val, base + EQOS_MAC_MCR); +done: return ret; } @@ -1444,25 +533,26 @@ static int eqos_config_frp(struct osi_core_priv_data *const osi_core, * Algorithm: * * @param[in] osi_core: OSI core private data. - * @param[in] enabled: Flag to indicate feature is to be enabled/disabled. + * @param[in] nve: Number of Valid Entries. * * @note MAC should be init and started. see osi_start_mac() * * @retval 0 on success * @retval -1 on failure. */ -static int eqos_update_frp_nve(struct osi_core_priv_data *const osi_core, - const unsigned int nve) +static nve32_t eqos_update_frp_nve(struct osi_core_priv_data *const osi_core, + const nveu32_t nve) { - unsigned int val; - unsigned char *base = osi_core->base; + nveu32_t val; + nveu8_t *base = osi_core->base; + nve32_t ret = -1; /* Validate the NVE value */ if (nve >= OSI_FRP_MAX_ENTRY) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid NVE value\n", nve); - return -1; + goto done; } /* Update NVE and NPE in MTL_RXP_Control_Status register */ @@ -1474,7 +564,10 @@ static int eqos_update_frp_nve(struct osi_core_priv_data *const osi_core, val |= ((nve << EQOS_MTL_RXP_CS_NPE_SHIFT) & EQOS_MTL_RXP_CS_NPE); osi_writela(osi_core, val, base + EQOS_MTL_RXP_CS); - return 0; + ret = 0; + +done: + return ret; } /** @@ -1491,13 +584,13 @@ static int eqos_update_frp_nve(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int eqos_frp_write(struct osi_core_priv_data *osi_core, - unsigned int addr, - unsigned int data) +static nve32_t eqos_frp_write(struct osi_core_priv_data *osi_core, + nveu32_t addr, + nveu32_t data) { - int ret = 0; - unsigned char *base = osi_core->base; - unsigned int val = 0U; + nve32_t ret = 0; + nveu8_t *base = osi_core->base; + nveu32_t val = 0U; /* Wait for ready */ ret = osi_readl_poll_timeout((base + EQOS_MTL_RXP_IND_CS), @@ -1511,7 +604,8 @@ static int eqos_frp_write(struct osi_core_priv_data *osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write\n", val); - return -1; + ret = -1; + goto done; } /* Write data into MTL_RXP_Indirect_Acc_Data */ @@ -1540,9 +634,10 @@ static int eqos_frp_write(struct osi_core_priv_data *osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write\n", val); - return -1; + ret = -1; } +done: return ret; } @@ -1560,19 +655,20 @@ static int eqos_frp_write(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int eqos_update_frp_entry(struct osi_core_priv_data *const osi_core, - const unsigned int pos, - struct osi_core_frp_data *const data) +static nve32_t eqos_update_frp_entry(struct osi_core_priv_data *const osi_core, + const nveu32_t pos, + struct osi_core_frp_data *const data) { - unsigned int val = 0U, tmp = 0U; - int ret = -1; + nveu32_t val = 0U, tmp = 0U; + nve32_t ret = -1; /* Validate pos value */ if (pos >= OSI_FRP_MAX_ENTRY) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid FRP table entry\n", pos); - return -1; + ret = -1; + goto done; } /** Write Match Data into IE0 **/ @@ -1580,7 +676,8 @@ static int eqos_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = eqos_frp_write(osi_core, EQOS_MTL_FRP_IE0(pos), val); if (ret < 0) { /* Match Data Write fail */ - return -1; + ret = -1; + goto done; } /** Write Match Enable into IE1 **/ @@ -1588,7 +685,8 @@ static int eqos_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = eqos_frp_write(osi_core, EQOS_MTL_FRP_IE1(pos), val); if (ret < 0) { /* Match Enable Write fail */ - return -1; + ret = -1; + goto done; } /** Write AF, RF, IM, NIC, FO and OKI into IE2 **/ @@ -1618,7 +716,8 @@ static int eqos_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = eqos_frp_write(osi_core, EQOS_MTL_FRP_IE2(pos), val); if (ret < 0) { /* FRP IE2 Write fail */ - return -1; + ret = -1; + goto done; } /** Write DCH into IE3 **/ @@ -1626,9 +725,10 @@ static int eqos_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = eqos_frp_write(osi_core, EQOS_MTL_FRP_IE3(pos), val); if (ret < 0) { /* DCH Write fail */ - return -1; + ret = -1; } +done: return ret; } @@ -1697,9 +797,7 @@ static void eqos_configure_rxq_priority( mfix_var2 <<= mfix_var1; val |= (temp & mfix_var2); /* Priorities Selected in the Receive Queue 0 */ - eqos_core_safety_writel(osi_core, val, - (nveu8_t *)osi_core->base + - EQOS_MAC_RQC2R, EQOS_MAC_RQC2R_IDX); + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + EQOS_MAC_RQC2R); } } @@ -1717,21 +815,20 @@ static void eqos_configure_rxq_priority( * @retval 0 on success * @retval -1 on failure */ -static int eqos_hsi_configure(struct osi_core_priv_data *const osi_core, +static nve32_t eqos_hsi_configure(struct osi_core_priv_data *const osi_core, const nveu32_t enable) { nveu32_t value; if (enable == OSI_ENABLE) { osi_core->hsi.enabled = OSI_ENABLE; - osi_core->hsi.reporter_id = hsi_err_code[osi_core->instance_id][REPORTER_IDX]; + osi_core->hsi.reporter_id = OSI_HSI_EQOS0_REPORTER_ID; /* T23X-EQOS_HSIv2-19: Enabling of Consistency Monitor for TX Frame Errors */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); value |= EQOS_IMR_TXESIE; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_IMR, EQOS_MAC_IMR_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); /* T23X-EQOS_HSIv2-1: Enabling of Memory ECC */ value = osi_readla(osi_core, @@ -1747,14 +844,14 @@ static int eqos_hsi_configure(struct osi_core_priv_data *const osi_core, /* T23X-EQOS_HSIv2-5: Enabling and Initialization of Transaction Timeout */ value = (0x198U << EQOS_TMR_SHIFT) & EQOS_TMR_MASK; - value |= (0x2U << EQOS_LTMRMD_SHIFT) & EQOS_LTMRMD_MASK; - value |= (0x1U << EQOS_NTMRMD_SHIFT) & EQOS_NTMRMD_MASK; + value |= ((nveu32_t)0x2U << EQOS_LTMRMD_SHIFT) & EQOS_LTMRMD_MASK; + value |= ((nveu32_t)0x2U << EQOS_NTMRMD_SHIFT) & EQOS_NTMRMD_MASK; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_FSM_ACT_TIMER); /* T23X-EQOS_HSIv2-3: Enabling and Initialization of Watchdog */ /* T23X-EQOS_HSIv2-4: Enabling of Consistency Monitor for FSM States */ - // TODO: enable EQOS_TMOUTEN + /* TODO enable EQOS_TMOUTEN. Bug 3584387 */ value = EQOS_PRTYEN; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_FSM_CONTROL); @@ -1798,8 +895,7 @@ static int eqos_hsi_configure(struct osi_core_priv_data *const osi_core, value = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); value &= ~EQOS_IMR_TXESIE; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_IMR, EQOS_MAC_IMR_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); /* T23X-EQOS_HSIv2-1: Disable of Memory ECC */ value = osi_readla(osi_core, @@ -1845,7 +941,51 @@ static int eqos_hsi_configure(struct osi_core_priv_data *const osi_core, } return 0; } + +/** + * @brief eqos_hsi_inject_err - inject error + * + * @note + * Algorithm: + * - Use error injection method induce error + * + * @param[in, out] osi_core: OSI core private data structure. + * @param[in] type: UE_IDX/CE_IDX + * + * @retval 0 on success + * @retval -1 on failure + */ + +static nve32_t eqos_hsi_inject_err(struct osi_core_priv_data *const osi_core, + const nveu32_t error_code) +{ + nveu32_t value; + nve32_t ret = 0; + + switch (error_code) { + case OSI_HSI_EQOS0_CE_CODE: + value = (EQOS_MTL_DBG_CTL_EIEC | EQOS_MTL_DBG_CTL_EIEE); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + EQOS_MTL_DBG_CTL); + break; + case OSI_HSI_EQOS0_UE_CODE: + value = EQOS_MTL_DPP_ECC_EIC_BLEI; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + EQOS_MTL_DPP_ECC_EIC); + + value = (EQOS_MTL_DBG_CTL_EIEC | EQOS_MTL_DBG_CTL_EIEE); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + EQOS_MTL_DBG_CTL); + break; + default: + ret = hsi_common_error_inject(osi_core, error_code); + break; + } + + return ret; +} #endif + /** * @brief eqos_configure_mac - Configure MAC * @@ -1905,8 +1045,7 @@ static void eqos_configure_mac(struct osi_core_priv_data *const osi_core) /* do nothing for default mtu size */ } - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_MCR, EQOS_MAC_MCR_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_MCR); /* Enable common interrupt at wrapper level */ if (osi_core->mac_ver >= OSI_EQOS_MAC_5_30) { @@ -1933,12 +1072,11 @@ static void eqos_configure_mac(struct osi_core_priv_data *const osi_core) /* Routing Multicast and Broadcast depending on mac version */ value &= ~(EQOS_MAC_RQC1R_MCBCQ); if (osi_core->mac_ver > OSI_EQOS_MAC_5_00) { - value |= EQOS_MAC_RQC1R_MCBCQ7 << EQOS_MAC_RQC1R_MCBCQ_SHIFT; + value |= ((nveu32_t)EQOS_MAC_RQC1R_MCBCQ7) << EQOS_MAC_RQC1R_MCBCQ_SHIFT; } else { - value |= EQOS_MAC_RQC1R_MCBCQ3 << EQOS_MAC_RQC1R_MCBCQ_SHIFT; + value |= ((nveu32_t)EQOS_MAC_RQC1R_MCBCQ3) << EQOS_MAC_RQC1R_MCBCQ_SHIFT; } - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_RQC1R, EQOS_MAC_RQC1R_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_RQC1R); /* Disable all MMC interrupts */ /* Disable all MMC Tx Interrupts */ @@ -1966,8 +1104,7 @@ static void eqos_configure_mac(struct osi_core_priv_data *const osi_core) /* RGSMIIIE - RGMII/SMII interrupt Enable. * LPIIE is not enabled. MMC LPI counters is maintained in HW */ value |= EQOS_IMR_RGSMIIIE; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_IMR, EQOS_MAC_IMR_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); /* Enable VLAN configuration */ value = osi_readla(osi_core, @@ -1995,6 +1132,7 @@ static void eqos_configure_mac(struct osi_core_priv_data *const osi_core) osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_VLANTIR); +#ifndef OSI_STRIPPED_LIB /* Configure default flow control settings */ if (osi_core->pause_frames != OSI_PAUSE_FRAMES_DISABLE) { osi_core->flow_ctrl = (OSI_FLOW_CTRL_TX | OSI_FLOW_CTRL_RX); @@ -2005,6 +1143,8 @@ static void eqos_configure_mac(struct osi_core_priv_data *const osi_core) 0ULL); } } +#endif /* !OSI_STRIPPED_LIB */ + /* USP (user Priority) to RxQ Mapping, only if DCS not enabled */ if (osi_core->dcs_en != OSI_ENABLE) { eqos_configure_rxq_priority(osi_core); @@ -2047,9 +1187,7 @@ static void eqos_configure_dma(struct osi_core_priv_data *const osi_core) /* AXI Maximum Write Outstanding Request Limit = 31 */ value |= EQOS_DMA_SBUS_WR_OSR_LMT; - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)base + EQOS_DMA_SBUS, - EQOS_DMA_SBUS_IDX); + osi_writela(osi_core, value, (nveu8_t *)base + EQOS_DMA_SBUS); value = osi_readla(osi_core, (nveu8_t *)base + EQOS_DMA_BMR); value |= EQOS_DMA_BMR_DPSW; @@ -2058,201 +1196,16 @@ static void eqos_configure_dma(struct osi_core_priv_data *const osi_core) /** \endcond */ /** - * @brief eqos_enable_mtl_interrupts - Enable MTL interrupts + * @brief Map DMA channels to a specific VM IRQ. * - * Algorithm: enable MTL interrupts for EST + * @param[in] osi_core: OSI private data structure. * - * @param[in] osi_core: OSI core private data structure. + * @note + * Dependencies: OSD layer needs to update number of VM channels and + * DMA channel list in osi_vm_irq_data. + * Protection: None. * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void eqos_enable_mtl_interrupts( - struct osi_core_priv_data *const osi_core) -{ - unsigned int mtl_est_ir = OSI_DISABLE; - void *addr = osi_core->base; - - mtl_est_ir = osi_readla(osi_core, (unsigned char *) - addr + EQOS_MTL_EST_ITRE); - /* enable only MTL interrupt realted to - * Constant Gate Control Error - * Head-Of-Line Blocking due to Scheduling - * Head-Of-Line Blocking due to Frame Size - * BTR Error - * Switch to S/W owned list Complete - */ - mtl_est_ir |= (EQOS_MTL_EST_ITRE_CGCE | EQOS_MTL_EST_ITRE_IEHS | - EQOS_MTL_EST_ITRE_IEHF | EQOS_MTL_EST_ITRE_IEBE | - EQOS_MTL_EST_ITRE_IECC); - osi_writela(osi_core, mtl_est_ir, - (unsigned char *)addr + EQOS_MTL_EST_ITRE); -} - -/** - * @brief eqos_enable_fpe_interrupts - Enable MTL interrupts - * - * Algorithm: enable FPE interrupts - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void eqos_enable_fpe_interrupts( - struct osi_core_priv_data *const osi_core) -{ - unsigned int value = OSI_DISABLE; - void *addr = osi_core->base; - - /* Read MAC IER Register and enable Frame Preemption Interrupt - * Enable */ - value = osi_readla(osi_core, (unsigned char *)addr + EQOS_MAC_IMR); - value |= EQOS_IMR_FPEIE; - osi_writela(osi_core, value, (unsigned char *)addr + EQOS_MAC_IMR); -} - -/** - * @brief eqos_save_gcl_params - save GCL configs in local core structure - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void eqos_save_gcl_params(struct osi_core_priv_data *osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - unsigned int gcl_widhth[4] = {0, OSI_MAX_24BITS, OSI_MAX_28BITS, - OSI_MAX_32BITS}; - nveu32_t gcl_ti_mask[4] = {0, OSI_MASK_16BITS, OSI_MASK_20BITS, - OSI_MASK_24BITS}; - unsigned int gcl_depthth[6] = {0, OSI_GCL_SIZE_64, OSI_GCL_SIZE_128, - OSI_GCL_SIZE_256, OSI_GCL_SIZE_512, - OSI_GCL_SIZE_1024}; - - if ((osi_core->hw_feature->gcl_width == 0) || - (osi_core->hw_feature->gcl_width > 3)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Wrong HW feature GCL width\n", - (unsigned long long)osi_core->hw_feature->gcl_width); - } else { - l_core->gcl_width_val = - gcl_widhth[osi_core->hw_feature->gcl_width]; - l_core->ti_mask = gcl_ti_mask[osi_core->hw_feature->gcl_width]; - } - - if ((osi_core->hw_feature->gcl_depth == 0) || - (osi_core->hw_feature->gcl_depth > 5)) { - /* Do Nothing */ - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Wrong HW feature GCL depth\n", - (unsigned long long)osi_core->hw_feature->gcl_depth); - } else { - l_core->gcl_dep = gcl_depthth[osi_core->hw_feature->gcl_depth]; - } -} - -/** - * @brief eqos_tsn_init - initialize TSN feature - * - * Algorithm: - * 1) If hardware support EST, - * a) Set default EST configuration - * b) Set enable interrupts - * 2) If hardware supports FPE - * a) Set default FPE configuration - * b) enable interrupts - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] est_sel: EST HW support present or not - * @param[in] fpe_sel: FPE HW support present or not - * - * @note MAC should be init and started. see osi_start_mac() - */ -static void eqos_tsn_init(struct osi_core_priv_data *osi_core, - unsigned int est_sel, unsigned int fpe_sel) -{ - unsigned int val = 0x0; - unsigned int temp = 0U; - - if (est_sel == OSI_ENABLE) { - eqos_save_gcl_params(osi_core); - val = osi_readla(osi_core, (unsigned char *)osi_core->base + - EQOS_MTL_EST_CONTROL); - - /* - * PTOV PTP clock period * 6 - * dual-port RAM based asynchronous FIFO controllers or - * Single-port RAM based synchronous FIFO controllers - * CTOV 96 x Tx clock period - * : - * : - * set other default value - */ - val &= ~EQOS_MTL_EST_CONTROL_PTOV; - if (osi_core->pre_si == OSI_ENABLE) { - /* 6*1/(78.6 MHz) in ns*/ - temp = (6U * 13U); - } else { - temp = EQOS_MTL_EST_PTOV_RECOMMEND; - } - temp = temp << EQOS_MTL_EST_CONTROL_PTOV_SHIFT; - val |= temp; - - val &= ~EQOS_MTL_EST_CONTROL_CTOV; - temp = EQOS_MTL_EST_CTOV_RECOMMEND; - temp = temp << EQOS_MTL_EST_CONTROL_CTOV_SHIFT; - val |= temp; - - /*Loop Count to report Scheduling Error*/ - val &= ~EQOS_MTL_EST_CONTROL_LCSE; - val |= EQOS_MTL_EST_CONTROL_LCSE_VAL; - - val &= ~(EQOS_MTL_EST_CONTROL_DDBF | - EQOS_MTL_EST_CONTROL_DFBS); - val |= EQOS_MTL_EST_CONTROL_DDBF; - - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - EQOS_MTL_EST_CONTROL); - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_MTL_EST_OVERHEAD); - val &= ~EQOS_MTL_EST_OVERHEAD_OVHD; - /* As per hardware team recommendation */ - val |= EQOS_MTL_EST_OVERHEAD_RECOMMEND; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - EQOS_MTL_EST_OVERHEAD); - - eqos_enable_mtl_interrupts(osi_core); - } - - if (fpe_sel == OSI_ENABLE) { - val = osi_readla(osi_core, (unsigned char *)osi_core->base + - EQOS_MAC_RQC1R); - val &= ~EQOS_MAC_RQC1R_FPRQ; - temp = osi_core->residual_queue; - temp = temp << EQOS_MAC_RQC1R_FPRQ_SHIFT; - temp = (temp & EQOS_MAC_RQC1R_FPRQ); - val |= temp; - osi_writela(osi_core, val, (unsigned char *)osi_core->base + - EQOS_MAC_RQC1R); - - eqos_enable_fpe_interrupts(osi_core); - } - - /* CBS setting for TC should be by user application/IOCTL as - * per requirement */ -} - -/** - * @brief Map DMA channels to a specific VM IRQ. - * - * @param[in] osi_core: OSI private data structure. - * - * @note - * Dependencies: OSD layer needs to update number of VM channels and - * DMA channel list in osi_vm_irq_data. - * Protection: None. - * - * @retval None. + * @retval None. */ static void eqos_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) { @@ -2260,10 +1213,6 @@ static void eqos_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) nveu32_t i, j; nveu32_t chan; - if (osi_core->mac_ver < OSI_EQOS_MAC_5_30) { - return; - } - for (i = 0; i < osi_core->num_vm_irqs; i++) { irq_data = &osi_core->irq_data[i]; for (j = 0; j < irq_data->num_vm_chans; j++) { @@ -2276,7 +1225,7 @@ static void eqos_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) EQOS_VIRT_INTR_APB_CHX_CNTRL(chan)); } osi_writel(OSI_BIT(irq_data->vm_num), - (nveu8_t *)osi_core->base + VIRTUAL_APB_ERR_CTRL); + (nveu8_t *)osi_core->base + VIRTUAL_APB_ERR_CTRL); } } @@ -2292,9 +1241,8 @@ static void eqos_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) * - TraceID:ETHERNET_NVETHERNETRM_006 * * @param[in] osi_core: OSI core private data structure. Used params are - * - base, dcs_en, num_mtl_queues, mtl_queues, mtu, stip_vlan_tag, pause_frames, l3l4_filter_bitmask - * @param[in] tx_fifo_size: MTL TX FIFO size. Max 11. - * @param[in] rx_fifo_size: MTL RX FIFO size. Max 11. + * - base, dcs_en, num_mtl_queues, mtl_queues, mtu, stip_vlan_tag, pause_frames, + * l3l4_filter_bitmask * * @pre * - MAC should be out of reset. See osi_poll_for_mac_reset_complete() @@ -2312,27 +1260,20 @@ static void eqos_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core, - const nveu32_t tx_fifo_size, - const nveu32_t rx_fifo_size) +static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core) { nve32_t ret = 0; nveu32_t qinx = 0; nveu32_t value = 0; nveu32_t value1 = 0; - nveu32_t tx_fifo = 0; - nveu32_t rx_fifo = 0; - - eqos_core_safety_init(osi_core); - eqos_core_backup_init(osi_core); #ifndef UPDATED_PAD_CAL /* PAD calibration */ ret = eqos_pad_calibrate(osi_core); if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "eqos pad calibration failed\n", 0ULL); - return ret; + goto fail; } #endif /* !UPDATED_PAD_CAL */ @@ -2341,6 +1282,7 @@ static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core, (nveu8_t *)osi_core->base + EQOS_MMC_CNTRL); if (osi_core->use_virtualization == OSI_DISABLE) { +#ifndef OSI_STRIPPED_LIB if (osi_core->hv_base != OSI_NULL) { osi_writela(osi_core, EQOS_5_30_ASID_CTRL_VAL, (nveu8_t *)osi_core->hv_base + @@ -2350,6 +1292,7 @@ static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core, (nveu8_t *)osi_core->hv_base + EQOS_AXI_ASID1_CTRL); } +#endif if (osi_core->mac_ver < OSI_EQOS_MAC_5_30) { /* AXI ASID CTRL for channel 0 to 3 */ @@ -2375,45 +1318,37 @@ static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core, value1 = EQOS_RXQ_TO_DMA_CHAN_MAP1; } - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MTL_RXQ_DMA_MAP0, - EQOS_MTL_RXQ_DMA_MAP0_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_RXQ_DMA_MAP0); if (osi_core->mac_ver >= OSI_EQOS_MAC_5_30) { - eqos_core_safety_writel(osi_core, value1, - (nveu8_t *)osi_core->base + - EQOS_MTL_RXQ_DMA_MAP1, - EQOS_MTL_RXQ_DMA_MAP1_IDX); + osi_writela(osi_core, value1, (nveu8_t *)osi_core->base + EQOS_MTL_RXQ_DMA_MAP1); } if (osi_unlikely(osi_core->num_mtl_queues > OSI_EQOS_MAX_NUM_QUEUES)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Number of queues is incorrect\n", 0ULL); - return -1; + ret = -1; + goto fail; } - /* Calculate value of Transmit queue fifo size to be programmed */ - tx_fifo = eqos_calculate_per_queue_fifo(osi_core->mac_ver, - tx_fifo_size, - osi_core->num_mtl_queues); - /* Calculate value of Receive queue fifo size to be programmed */ - rx_fifo = eqos_calculate_per_queue_fifo(osi_core->mac_ver, - rx_fifo_size, - osi_core->num_mtl_queues); - /* Configure MTL Queues */ for (qinx = 0; qinx < osi_core->num_mtl_queues; qinx++) { if (osi_unlikely(osi_core->mtl_queues[qinx] >= OSI_EQOS_MAX_NUM_QUEUES)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Incorrect queues number\n", 0ULL); - return -1; + ret = -1; + goto fail; } - ret = eqos_configure_mtl_queue(osi_core->mtl_queues[qinx], - osi_core, tx_fifo, rx_fifo); + ret = eqos_configure_mtl_queue(osi_core, osi_core->mtl_queues[qinx]); if (ret < 0) { - return ret; + goto fail; } + /* Enable by default to configure forward error packets. + * Since this is a local function this will always return sucess, + * so no need to check for return value + */ + (void)hw_config_fw_err_pkts(osi_core, osi_core->mtl_queues[qinx], OSI_ENABLE); } /* configure EQOS MAC HW */ @@ -2424,15 +1359,17 @@ static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core, /* tsn initialization */ if (osi_core->hw_feature != OSI_NULL) { - eqos_tsn_init(osi_core, osi_core->hw_feature->est_sel, - osi_core->hw_feature->fpe_sel); + hw_tsn_init(osi_core, osi_core->hw_feature->est_sel, + osi_core->hw_feature->fpe_sel); } /* initialize L3L4 Filters variable */ osi_core->l3l4_filter_bitmask = OSI_NONE; - eqos_dma_chan_to_vmirq_map(osi_core); - + if (osi_core->mac_ver >= OSI_EQOS_MAC_5_30) { + eqos_dma_chan_to_vmirq_map(osi_core); + } +fail: return ret; } @@ -2448,11 +1385,11 @@ static nve32_t eqos_core_init(struct osi_core_priv_data *const osi_core, */ static void eqos_handle_mac_fpe_intrs(struct osi_core_priv_data *osi_core) { - unsigned int val = 0; + nveu32_t val = 0; /* interrupt bit clear on read as CSR_SW is reset */ val = osi_readla(osi_core, - (unsigned char *)osi_core->base + EQOS_MAC_FPE_CTS); + (nveu8_t *)osi_core->base + EQOS_MAC_FPE_CTS); if ((val & EQOS_MAC_FPE_CTS_RVER) == EQOS_MAC_FPE_CTS_RVER) { val &= ~EQOS_MAC_FPE_CTS_RVER; @@ -2486,7 +1423,58 @@ static void eqos_handle_mac_fpe_intrs(struct osi_core_priv_data *osi_core) } osi_writela(osi_core, val, - (unsigned char *)osi_core->base + EQOS_MAC_FPE_CTS); + (nveu8_t *)osi_core->base + EQOS_MAC_FPE_CTS); +} + +/** + * @brief eqos_handle_mac_link_intrs + * + * Algorithm: This function takes care of handling the + * MAC link interrupts. + * + * @param[in] osi_core: OSI core private data structure. + * + * @note MAC interrupts need to be enabled + */ +static void eqos_handle_mac_link_intrs(struct osi_core_priv_data *osi_core) +{ + nveu32_t mac_pcs = 0; + nve32_t ret = 0; + + mac_pcs = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_PCS); + /* check whether Link is UP or NOT - if not return. */ + if ((mac_pcs & EQOS_MAC_PCS_LNKSTS) == EQOS_MAC_PCS_LNKSTS) { + /* check for Link mode (full/half duplex) */ + if ((mac_pcs & EQOS_MAC_PCS_LNKMOD) == EQOS_MAC_PCS_LNKMOD) { + ret = hw_set_mode(osi_core, OSI_FULL_DUPLEX); + if (osi_unlikely(ret < 0)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "set mode in full duplex failed\n", 0ULL); + } + } else { + ret = hw_set_mode(osi_core, OSI_HALF_DUPLEX); + if (osi_unlikely(ret < 0)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "set mode in half duplex failed\n", 0ULL); + } + } + + /* set speed at MAC level */ + /* TODO: set_tx_clk needs to be done */ + /* Maybe through workqueue for QNX */ + /* hw_set_speed is treated as void since it is + * an internal functin which will be always success + */ + if ((mac_pcs & EQOS_MAC_PCS_LNKSPEED) == EQOS_MAC_PCS_LNKSPEED_10) { + (void)hw_set_speed(osi_core, OSI_SPEED_10); + } else if ((mac_pcs & EQOS_MAC_PCS_LNKSPEED) == EQOS_MAC_PCS_LNKSPEED_100) { + (void)hw_set_speed(osi_core, OSI_SPEED_100); + } else if ((mac_pcs & EQOS_MAC_PCS_LNKSPEED) == EQOS_MAC_PCS_LNKSPEED_1000) { + (void)hw_set_speed(osi_core, OSI_SPEED_1000); + } else { + /* Nothing here */ + } + } } /** @@ -2501,7 +1489,7 @@ static void eqos_handle_mac_fpe_intrs(struct osi_core_priv_data *osi_core) * - RGMII/SMII MAC interrupt * - If link is down * - Identify speed and mode changes from EQOS_MAC_PCS register and configure the same by calling - * eqos_set_speed(), eqos_set_mode()(proceed even on error for this call) API's. + * hw_set_speed(), hw_set_mode()(proceed even on error for this call) API's. * - SWUD_ID: ETHERNET_NVETHERNETRM_010_1 * * @param[in] osi_core: OSI core private data structure. Used param base. @@ -2519,104 +1507,51 @@ static void eqos_handle_mac_intrs(struct osi_core_priv_data *const osi_core, nveu32_t dma_isr) { nveu32_t mac_imr = 0; - nveu32_t mac_pcs = 0; nveu32_t mac_isr = 0; - nve32_t ret = 0; #ifdef HSI_SUPPORT nveu64_t tx_frame_err = 0; #endif - mac_isr = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MAC_ISR); + mac_isr = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_ISR); + /* Handle MAC interrupts */ + if ((dma_isr & EQOS_DMA_ISR_MACIS) == EQOS_DMA_ISR_MACIS) { #ifdef HSI_SUPPORT - if (osi_core->mac_ver >= OSI_EQOS_MAC_5_30) { - /* T23X-EQOS_HSIv2-19: Consistency Monitor for TX Frame */ - if ((dma_isr & EQOS_DMA_ISR_TXSTSIS) == EQOS_DMA_ISR_TXSTSIS) { - osi_core->hsi.tx_frame_err_count = - osi_update_stats_counter(osi_core->hsi.tx_frame_err_count, - 1UL); - tx_frame_err = osi_core->hsi.tx_frame_err_count / - osi_core->hsi.err_count_threshold; - if (osi_core->hsi.tx_frame_err_threshold < tx_frame_err) { - osi_core->hsi.tx_frame_err_threshold = tx_frame_err; - osi_core->hsi.report_count_err[TX_FRAME_ERR_IDX] = OSI_ENABLE; + if (osi_core->mac_ver >= OSI_EQOS_MAC_5_30) { + /* T23X-EQOS_HSIv2-19: Consistency Monitor for TX Frame */ + if ((dma_isr & EQOS_DMA_ISR_TXSTSIS) == EQOS_DMA_ISR_TXSTSIS) { + osi_core->hsi.tx_frame_err_count = + osi_update_stats_counter(osi_core->hsi.tx_frame_err_count, + 1UL); + tx_frame_err = osi_core->hsi.tx_frame_err_count / + osi_core->hsi.err_count_threshold; + if (osi_core->hsi.tx_frame_err_threshold < tx_frame_err) { + osi_core->hsi.tx_frame_err_threshold = tx_frame_err; + osi_core->hsi.report_count_err[TX_FRAME_ERR_IDX] = + OSI_ENABLE; + } + osi_core->hsi.err_code[TX_FRAME_ERR_IDX] = OSI_TX_FRAME_ERR; + osi_core->hsi.report_err = OSI_ENABLE; } - osi_core->hsi.err_code[TX_FRAME_ERR_IDX] = - OSI_TX_FRAME_ERR; - osi_core->hsi.report_err = OSI_ENABLE; } - } #endif - /* Handle MAC interrupts */ - if ((dma_isr & EQOS_DMA_ISR_MACIS) != EQOS_DMA_ISR_MACIS) { - return; - } - - /* handle only those MAC interrupts which are enabled */ - mac_imr = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MAC_IMR); - mac_isr = (mac_isr & mac_imr); - - /* RGMII/SMII interrupt */ - if (((mac_isr & EQOS_MAC_ISR_RGSMIIS) != EQOS_MAC_ISR_RGSMIIS) && - ((mac_isr & EQOS_MAC_IMR_FPEIS) != EQOS_MAC_IMR_FPEIS)) { - return; - } - - if (((mac_isr & EQOS_MAC_IMR_FPEIS) == EQOS_MAC_IMR_FPEIS) && - ((mac_imr & EQOS_IMR_FPEIE) == EQOS_IMR_FPEIE)) { - eqos_handle_mac_fpe_intrs(osi_core); - mac_isr &= ~EQOS_MAC_IMR_FPEIS; - } - osi_writela(osi_core, mac_isr, - (nveu8_t *) osi_core->base + EQOS_MAC_ISR); + /* handle only those MAC interrupts which are enabled */ + mac_imr = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); + mac_isr = (mac_isr & mac_imr); - mac_pcs = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MAC_PCS); - /* check whether Link is UP or NOT - if not return. */ - if ((mac_pcs & EQOS_MAC_PCS_LNKSTS) != EQOS_MAC_PCS_LNKSTS) { - return; - } - - /* check for Link mode (full/half duplex) */ - if ((mac_pcs & EQOS_MAC_PCS_LNKMOD) == EQOS_MAC_PCS_LNKMOD) { - ret = eqos_set_mode(osi_core, OSI_FULL_DUPLEX); - if (osi_unlikely(ret < 0)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "set mode in full duplex failed\n", 0ULL); - } - } else { - ret = eqos_set_mode(osi_core, OSI_HALF_DUPLEX); - if (osi_unlikely(ret < 0)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "set mode in half duplex failed\n", 0ULL); + if (((mac_isr & EQOS_MAC_IMR_FPEIS) == EQOS_MAC_IMR_FPEIS) && + ((mac_imr & EQOS_IMR_FPEIE) == EQOS_IMR_FPEIE)) { + eqos_handle_mac_fpe_intrs(osi_core); } - } - /* set speed at MAC level */ - /* TODO: set_tx_clk needs to be done */ - /* Maybe through workqueue for QNX */ - if ((mac_pcs & EQOS_MAC_PCS_LNKSPEED) == EQOS_MAC_PCS_LNKSPEED_10) { - eqos_set_speed(osi_core, OSI_SPEED_10); - } else if ((mac_pcs & EQOS_MAC_PCS_LNKSPEED) == - EQOS_MAC_PCS_LNKSPEED_100) { - eqos_set_speed(osi_core, OSI_SPEED_100); - } else if ((mac_pcs & EQOS_MAC_PCS_LNKSPEED) == - EQOS_MAC_PCS_LNKSPEED_1000) { - eqos_set_speed(osi_core, OSI_SPEED_1000); - } else { - /* Nothing here */ + if ((mac_isr & EQOS_MAC_ISR_RGSMIIS) == EQOS_MAC_ISR_RGSMIIS) { + eqos_handle_mac_link_intrs(osi_core); + } } - if (((mac_isr & EQOS_MAC_IMR_FPEIS) == EQOS_MAC_IMR_FPEIS) && - ((mac_imr & EQOS_IMR_FPEIE) == EQOS_IMR_FPEIE)) { - eqos_handle_mac_fpe_intrs(osi_core); - mac_isr &= ~EQOS_MAC_IMR_FPEIS; - } - osi_writela(osi_core, mac_isr, - (unsigned char *)osi_core->base + EQOS_MAC_ISR); + return; } +#ifndef OSI_STRIPPED_LIB /** \cond DO_NOT_DOCUMENT */ /** * @brief update_dma_sr_stats - stats for dma_status error @@ -2642,37 +1577,38 @@ static inline void update_dma_sr_stats( nveu64_t val; if ((dma_sr & EQOS_DMA_CHX_STATUS_RBU) == EQOS_DMA_CHX_STATUS_RBU) { - val = osi_core->xstats.rx_buf_unavail_irq_n[qinx]; - osi_core->xstats.rx_buf_unavail_irq_n[qinx] = + val = osi_core->stats.rx_buf_unavail_irq_n[qinx]; + osi_core->stats.rx_buf_unavail_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & EQOS_DMA_CHX_STATUS_TPS) == EQOS_DMA_CHX_STATUS_TPS) { - val = osi_core->xstats.tx_proc_stopped_irq_n[qinx]; - osi_core->xstats.tx_proc_stopped_irq_n[qinx] = + val = osi_core->stats.tx_proc_stopped_irq_n[qinx]; + osi_core->stats.tx_proc_stopped_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & EQOS_DMA_CHX_STATUS_TBU) == EQOS_DMA_CHX_STATUS_TBU) { - val = osi_core->xstats.tx_buf_unavail_irq_n[qinx]; - osi_core->xstats.tx_buf_unavail_irq_n[qinx] = + val = osi_core->stats.tx_buf_unavail_irq_n[qinx]; + osi_core->stats.tx_buf_unavail_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & EQOS_DMA_CHX_STATUS_RPS) == EQOS_DMA_CHX_STATUS_RPS) { - val = osi_core->xstats.rx_proc_stopped_irq_n[qinx]; - osi_core->xstats.rx_proc_stopped_irq_n[qinx] = + val = osi_core->stats.rx_proc_stopped_irq_n[qinx]; + osi_core->stats.rx_proc_stopped_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & EQOS_DMA_CHX_STATUS_RWT) == EQOS_DMA_CHX_STATUS_RWT) { - val = osi_core->xstats.rx_watchdog_irq_n; - osi_core->xstats.rx_watchdog_irq_n = + val = osi_core->stats.rx_watchdog_irq_n; + osi_core->stats.rx_watchdog_irq_n = osi_update_stats_counter(val, 1U); } if ((dma_sr & EQOS_DMA_CHX_STATUS_FBE) == EQOS_DMA_CHX_STATUS_FBE) { - val = osi_core->xstats.fatal_bus_error_irq_n; - osi_core->xstats.fatal_bus_error_irq_n = + val = osi_core->stats.fatal_bus_error_irq_n; + osi_core->stats.fatal_bus_error_irq_n = osi_update_stats_counter(val, 1U); } } /** \endcond */ +#endif /* !OSI_STRIPPED_LIB */ /** * @brief eqos_handle_mtl_intrs - Handle MTL interrupts @@ -2692,37 +1628,37 @@ static inline void update_dma_sr_stats( */ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) { - unsigned int val = 0U; - unsigned int sch_err = 0U; - unsigned int frm_err = 0U; - unsigned int temp = 0U; - unsigned int i = 0; - unsigned long stat_val = 0U; - unsigned int value = 0U; + nveu32_t val = 0U; + nveu32_t sch_err = 0U; + nveu32_t frm_err = 0U; + nveu32_t temp = 0U; + nveu32_t i = 0; + nveul64_t stat_val = 0U; + nveu32_t value = 0U; val = osi_readla(osi_core, - (unsigned char *)osi_core->base + EQOS_MTL_EST_STATUS); + (nveu8_t *)osi_core->base + EQOS_MTL_EST_STATUS); val &= (EQOS_MTL_EST_STATUS_CGCE | EQOS_MTL_EST_STATUS_HLBS | EQOS_MTL_EST_STATUS_HLBF | EQOS_MTL_EST_STATUS_BTRE | EQOS_MTL_EST_STATUS_SWLC); /* return if interrupt is not related to EST */ if (val == OSI_DISABLE) { - return; + goto done; } /* increase counter write 1 back will clear */ if ((val & EQOS_MTL_EST_STATUS_CGCE) == EQOS_MTL_EST_STATUS_CGCE) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.const_gate_ctr_err; - osi_core->tsn_stats.const_gate_ctr_err = + stat_val = osi_core->stats.const_gate_ctr_err; + osi_core->stats.const_gate_ctr_err = osi_update_stats_counter(stat_val, 1U); } if ((val & EQOS_MTL_EST_STATUS_HLBS) == EQOS_MTL_EST_STATUS_HLBS) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.head_of_line_blk_sch; - osi_core->tsn_stats.head_of_line_blk_sch = + stat_val = osi_core->stats.head_of_line_blk_sch; + osi_core->stats.head_of_line_blk_sch = osi_update_stats_counter(stat_val, 1U); /* Need to read MTL_EST_Sch_Error register and cleared */ sch_err = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -2731,8 +1667,8 @@ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) temp = OSI_ENABLE; temp = temp << i; if ((sch_err & temp) == temp) { - stat_val = osi_core->tsn_stats.hlbs_q[i]; - osi_core->tsn_stats.hlbs_q[i] = + stat_val = osi_core->stats.hlbs_q[i]; + osi_core->stats.hlbs_q[i] = osi_update_stats_counter(stat_val, 1U); } } @@ -2747,7 +1683,7 @@ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) value &= ~EQOS_MTL_EST_CONTROL_EEST; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_EST_CONTROL); - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Disabling EST due to HLBS, correct GCL\n", OSI_NONE); } @@ -2755,8 +1691,8 @@ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) if ((val & EQOS_MTL_EST_STATUS_HLBF) == EQOS_MTL_EST_STATUS_HLBF) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.head_of_line_blk_frm; - osi_core->tsn_stats.head_of_line_blk_frm = + stat_val = osi_core->stats.head_of_line_blk_frm; + osi_core->stats.head_of_line_blk_frm = osi_update_stats_counter(stat_val, 1U); /* Need to read MTL_EST_Frm_Size_Error register and cleared */ frm_err = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -2765,8 +1701,8 @@ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) temp = OSI_ENABLE; temp = temp << i; if ((frm_err & temp) == temp) { - stat_val = osi_core->tsn_stats.hlbf_q[i]; - osi_core->tsn_stats.hlbf_q[i] = + stat_val = osi_core->stats.hlbf_q[i]; + osi_core->stats.hlbf_q[i] = osi_update_stats_counter(stat_val, 1U); } } @@ -2782,7 +1718,7 @@ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) value &= ~EQOS_MTL_EST_CONTROL_EEST; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_EST_CONTROL); - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Disabling EST due to HLBF, correct GCL\n", OSI_NONE); } @@ -2793,907 +1729,35 @@ static void eqos_handle_mtl_intrs(struct osi_core_priv_data *osi_core) EQOS_MTL_EST_STATUS_BTRE) { osi_core->est_ready = OSI_ENABLE; } - stat_val = osi_core->tsn_stats.sw_own_list_complete; - osi_core->tsn_stats.sw_own_list_complete = + stat_val = osi_core->stats.sw_own_list_complete; + osi_core->stats.sw_own_list_complete = osi_update_stats_counter(stat_val, 1U); } if ((val & EQOS_MTL_EST_STATUS_BTRE) == EQOS_MTL_EST_STATUS_BTRE) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.base_time_reg_err; - osi_core->tsn_stats.base_time_reg_err = + stat_val = osi_core->stats.base_time_reg_err; + osi_core->stats.base_time_reg_err = osi_update_stats_counter(stat_val, 1U); osi_core->est_ready = OSI_DISABLE; } /* clear EST status register as interrupt is handled */ osi_writela(osi_core, val, (nveu8_t *)osi_core->base + EQOS_MTL_EST_STATUS); -} - -#ifdef HSI_SUPPORT -/** - * @brief eqos_handle_hsi_intr - Handles HSI interrupt. - * - * @note - * Algorithm: - * - Clear HSI interrupt source. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_handle_hsi_intr(struct osi_core_priv_data *const osi_core) -{ - nveu32_t val = 0U; - nveu32_t val2 = 0U; - nveu64_t ce_count_threshold; - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_WRAP_COMMON_INTR_STATUS); - if (((val & EQOS_REGISTER_PARITY_ERR) == EQOS_REGISTER_PARITY_ERR) || - ((val & EQOS_CORE_UNCORRECTABLE_ERR) == EQOS_CORE_UNCORRECTABLE_ERR)) { - osi_core->hsi.err_code[UE_IDX] = - hsi_err_code[osi_core->instance_id][UE_IDX]; - osi_core->hsi.report_err = OSI_ENABLE; - osi_core->hsi.report_count_err[UE_IDX] = OSI_ENABLE; - /* Disable the interrupt */ - val2 = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_WRAP_COMMON_INTR_ENABLE); - val2 &= ~EQOS_REGISTER_PARITY_ERR; - val2 &= ~EQOS_CORE_UNCORRECTABLE_ERR; - osi_writela(osi_core, val2, (nveu8_t *)osi_core->base + - EQOS_WRAP_COMMON_INTR_ENABLE); - } - if ((val & EQOS_CORE_CORRECTABLE_ERR) == EQOS_CORE_CORRECTABLE_ERR) { - osi_core->hsi.err_code[CE_IDX] = - hsi_err_code[osi_core->instance_id][CE_IDX]; - osi_core->hsi.report_err = OSI_ENABLE; - osi_core->hsi.ce_count = - osi_update_stats_counter(osi_core->hsi.ce_count, 1UL); - ce_count_threshold = osi_core->hsi.ce_count / osi_core->hsi.err_count_threshold; - if (osi_core->hsi.ce_count_threshold < ce_count_threshold) { - osi_core->hsi.ce_count_threshold = ce_count_threshold; - osi_core->hsi.report_count_err[CE_IDX] = OSI_ENABLE; - } - } - val &= ~EQOS_MAC_SBD_INTR; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - EQOS_WRAP_COMMON_INTR_STATUS); - - if (((val & EQOS_CORE_CORRECTABLE_ERR) == EQOS_CORE_CORRECTABLE_ERR) || - ((val & EQOS_CORE_UNCORRECTABLE_ERR) == EQOS_CORE_UNCORRECTABLE_ERR)) { - - /* Clear FSM error status. Clear on read */ - (void)osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_MAC_DPP_FSM_INTERRUPT_STATUS); - - /* Clear ECC error status register */ - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_MTL_ECC_INTERRUPT_STATUS); - if (val != 0U) { - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - EQOS_MTL_ECC_INTERRUPT_STATUS); - } - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_DMA_ECC_INTERRUPT_STATUS); - if (val != 0U) { - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - EQOS_DMA_ECC_INTERRUPT_STATUS); - } - } -} -#endif - -/** - * @brief eqos_handle_common_intr - Handles common interrupt. - * - * @note - * Algorithm: - * - Reads DMA ISR register - * - Returns if calue is 0. - * - Handle Non-TI/RI interrupts for all MTL queues and increments #osi_core_priv_data->xstats - * based on error detected per cahnnel. - * - Calls eqos_handle_mac_intrs() to handle MAC interrupts. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_010 - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_handle_common_intr(struct osi_core_priv_data *const osi_core) -{ - void *base = osi_core->base; - nveu32_t dma_isr = 0; - nveu32_t qinx = 0; - nveu32_t i = 0; - nveu32_t dma_sr = 0; - nveu32_t dma_ier = 0; - nveu32_t mtl_isr = 0; - nveu32_t frp_isr = 0U; - - if (osi_core->mac_ver >= OSI_EQOS_MAC_5_30) { - osi_writela(osi_core, EQOS_MAC_SBD_INTR, (nveu8_t *)osi_core->base + - EQOS_WRAP_COMMON_INTR_STATUS); -#ifdef HSI_SUPPORT - if (osi_core->hsi.enabled == OSI_ENABLE) { - eqos_handle_hsi_intr(osi_core); - } -#endif - } - - dma_isr = osi_readla(osi_core, (nveu8_t *)base + EQOS_DMA_ISR); - if (dma_isr == 0U) { - return; - } - - //FIXME Need to check how we can get the DMA channel here instead of - //MTL Queues - if ((dma_isr & EQOS_DMA_CHAN_INTR_STATUS) != 0U) { - /* Handle Non-TI/RI interrupts */ - for (i = 0; i < osi_core->num_mtl_queues; i++) { - qinx = osi_core->mtl_queues[i]; - if (qinx >= OSI_EQOS_MAX_NUM_CHANS) { - continue; - } - - /* read dma channel status register */ - dma_sr = osi_readla(osi_core, (nveu8_t *)base + - EQOS_DMA_CHX_STATUS(qinx)); - /* read dma channel interrupt enable register */ - dma_ier = osi_readla(osi_core, (nveu8_t *)base + - EQOS_DMA_CHX_IER(qinx)); - - /* process only those interrupts which we - * have enabled. - */ - dma_sr = (dma_sr & dma_ier); - - /* mask off RI and TI */ - dma_sr &= ~(OSI_BIT(6) | OSI_BIT(0)); - if (dma_sr == 0U) { - continue; - } - - /* ack non ti/ri ints */ - osi_writela(osi_core, dma_sr, (nveu8_t *)base + - EQOS_DMA_CHX_STATUS(qinx)); - update_dma_sr_stats(osi_core, dma_sr, qinx); - } - } - - eqos_handle_mac_intrs(osi_core, dma_isr); - /* Handle MTL inerrupts */ - mtl_isr = osi_readla(osi_core, - (unsigned char *)base + EQOS_MTL_INTR_STATUS); - if (((mtl_isr & EQOS_MTL_IS_ESTIS) == EQOS_MTL_IS_ESTIS) && - ((dma_isr & EQOS_DMA_ISR_MTLIS) == EQOS_DMA_ISR_MTLIS)) { - eqos_handle_mtl_intrs(osi_core); - mtl_isr &= ~EQOS_MTL_IS_ESTIS; - osi_writela(osi_core, mtl_isr, (unsigned char *)base + - EQOS_MTL_INTR_STATUS); - } - - /* Clear FRP Interrupt MTL_RXP_Interrupt_Control_Status */ - frp_isr = osi_readla(osi_core, - (unsigned char *)base + EQOS_MTL_RXP_INTR_CS); - frp_isr |= (EQOS_MTL_RXP_INTR_CS_NVEOVIS | - EQOS_MTL_RXP_INTR_CS_NPEOVIS | - EQOS_MTL_RXP_INTR_CS_FOOVIS | - EQOS_MTL_RXP_INTR_CS_PDRFIS); - osi_writela(osi_core, frp_isr, - (unsigned char *)base + EQOS_MTL_RXP_INTR_CS); -} - -/** - * @brief eqos_start_mac - Start MAC Tx/Rx engine - * - * @note - * Algorithm: - * - Enable MAC Transmitter and Receiver in EQOS_MAC_MCR_IDX - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_008 - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre - * - MAC init should be complete. See osi_hw_core_init() and - * osi_hw_dma_init() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_start_mac(struct osi_core_priv_data *const osi_core) -{ - nveu32_t value; - void *addr = osi_core->base; - - value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_MCR); - /* Enable MAC Transmit */ - /* Enable MAC Receive */ - value |= EQOS_MCR_TE | EQOS_MCR_RE; - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); -} - -/** - * @brief eqos_stop_mac - Stop MAC Tx/Rx engine - * - * @note - * Algorithm: - * - Disable MAC Transmitter and Receiver in EQOS_MAC_MCR_IDX - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_007 - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre MAC DMA deinit should be complete. See osi_hw_dma_deinit() - * - * @note - * API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - */ -static void eqos_stop_mac(struct osi_core_priv_data *const osi_core) -{ - nveu32_t value; - void *addr = osi_core->base; - - value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_MCR); - /* Disable MAC Transmit */ - /* Disable MAC Receive */ - value &= ~EQOS_MCR_TE; - value &= ~EQOS_MCR_RE; - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); -} - -#ifdef MACSEC_SUPPORT -/** - * @brief eqos_config_mac_tx - Enable/Disable MAC Tx - * - * @note - * Algorithm: - * - Enable or Disables MAC Transmitter - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] enable: Enable or Disable.MAC Tx - * - * @pre MAC init should be complete. See osi_hw_core_init() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_config_mac_tx(struct osi_core_priv_data *const osi_core, - const nveu32_t enable) -{ - nveu32_t value; - void *addr = osi_core->base; - - if (enable == OSI_ENABLE) { - value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_MCR); - /* Enable MAC Transmit */ - value |= EQOS_MCR_TE; - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); - } else { - value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_MCR); - /* Disable MAC Transmit */ - value &= ~EQOS_MCR_TE; - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); - } -} -#endif /* MACSEC_SUPPORT */ - -/** - * @brief eqos_config_l2_da_perfect_inverse_match - configure register for - * inverse or perfect match. - * - * @note - * Algorithm: - * - use perfect_inverse_match filed to set perfect/inverse matching for L2 DA. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_018 - * - * @param[in] base: Base address from OSI core private data structure. - * @param[in] perfect_inverse_match: OSI_INV_MATCH - inverse mode else - perfect mode - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 always - */ -static inline nve32_t eqos_config_l2_da_perfect_inverse_match( - struct osi_core_priv_data *const osi_core, - nveu32_t perfect_inverse_match) -{ - nveu32_t value = 0U; - - value = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MAC_PFR); - value &= ~EQOS_MAC_PFR_DAIF; - if (perfect_inverse_match == OSI_INV_MATCH) { - value |= EQOS_MAC_PFR_DAIF; - } - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)osi_core->base + EQOS_MAC_PFR, - EQOS_MAC_PFR_IDX); - - return 0; -} - -/** - * @brief eqos_config_mac_pkt_filter_reg - configure mac filter register. - * - * @note - * - This sequence is used to configure MAC in different pkt - * processing modes like promiscuous, multicast, unicast, - * hash unicast/multicast based on input filter arguments. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_018 - * - * @param[in] osi_core: OSI core private data structure. Used param base. - * @param[in] filter: OSI filter structure. used param oper_mode. - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 always - */ -static nve32_t eqos_config_mac_pkt_filter_reg( - struct osi_core_priv_data *const osi_core, - const struct osi_filter *filter) -{ - nveu32_t value = 0U; - nve32_t ret = 0; - - value = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_PFR); - - /*Retain all other values */ - value &= (EQOS_MAC_PFR_DAIF | EQOS_MAC_PFR_DBF | EQOS_MAC_PFR_SAIF | - EQOS_MAC_PFR_SAF | EQOS_MAC_PFR_PCF | EQOS_MAC_PFR_VTFE | - EQOS_MAC_PFR_IPFE | EQOS_MAC_PFR_DNTU | EQOS_MAC_PFR_RA); - - if ((filter->oper_mode & OSI_OPER_EN_PROMISC) != OSI_DISABLE) { - value |= EQOS_MAC_PFR_PR; - } - - if ((filter->oper_mode & OSI_OPER_DIS_PROMISC) != OSI_DISABLE) { - value &= ~EQOS_MAC_PFR_PR; - } - - if ((filter->oper_mode & OSI_OPER_EN_ALLMULTI) != OSI_DISABLE) { - value |= EQOS_MAC_PFR_PM; - } - - if ((filter->oper_mode & OSI_OPER_DIS_ALLMULTI) != OSI_DISABLE) { - value &= ~EQOS_MAC_PFR_PM; - } - - if ((filter->oper_mode & OSI_OPER_EN_PERFECT) != OSI_DISABLE) { - value |= EQOS_MAC_PFR_HPF; - } - - if ((filter->oper_mode & OSI_OPER_DIS_PERFECT) != OSI_DISABLE) { - value &= ~EQOS_MAC_PFR_HPF; - } - - - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_PFR, EQOS_MAC_PFR_IDX); - - if ((filter->oper_mode & OSI_OPER_EN_L2_DA_INV) != OSI_DISABLE) { - ret = eqos_config_l2_da_perfect_inverse_match(osi_core, - OSI_INV_MATCH); - } - - if ((filter->oper_mode & OSI_OPER_DIS_L2_DA_INV) != OSI_DISABLE) { - ret = eqos_config_l2_da_perfect_inverse_match(osi_core, - OSI_PFT_MATCH); - } - - return ret; -} - -/** - * @brief eqos_update_mac_addr_helper - Function to update DCS and MBC; helper function for - * eqos_update_mac_addr_low_high_reg() - * - * @note - * Algorithm: - * - Validation of dma_chan if dma_routing_enable is OSI_ENABLE and addr_mask - * - corresponding sections not updated if invalid. - * - This helper routine is to update value parameter based on DCS and MBC - * sections of L2 register. - * dsc_en status performed before updating DCS bits. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_018 - * - * @param[in] osi_core: OSI core private data structure. Used param base. - * @param[out] value: nveu32_t pointer which has value read from register. - * @param[in] idx: Refer #osi_filter->index for details. - * @param[in] dma_routing_enable: Refer #osi_filter->dma_routing for details. - * @param[in] dma_chan: Refer #osi_filter->dma_chan for details. - * @param[in] addr_mask: Refer #osi_filter->addr_mask for details. - * - * @pre - * - MAC should be initialized and started. see osi_start_mac() - * - osi_core->osd should be populated. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static inline nve32_t eqos_update_mac_addr_helper( - const struct osi_core_priv_data *osi_core, - nveu32_t *value, - const nveu32_t idx, - const nveu32_t dma_chan, - const nveu32_t addr_mask, - OSI_UNUSED const nveu32_t src_dest) -{ - nveu32_t temp; - - /* PDC bit of MAC_Ext_Configuration register is set so binary - * value representation form index 32-127 else hot-bit - * representation. - */ - if ((idx < EQOS_MAX_MAC_ADDR_REG) && - (osi_core->mac_ver >= OSI_EQOS_MAC_5_00)) { - *value &= EQOS_MAC_ADDRH_DCS; - temp = OSI_BIT(dma_chan); - temp = temp << EQOS_MAC_ADDRH_DCS_SHIFT; - temp = temp & EQOS_MAC_ADDRH_DCS; - *value = *value | temp; - } else { - *value = OSI_DISABLE; - temp = dma_chan; - temp = temp << EQOS_MAC_ADDRH_DCS_SHIFT; - temp = temp & EQOS_MAC_ADDRH_DCS; - *value = temp; - } - - /* Address mask is valid for address 1 to 31 index only */ - if ((addr_mask <= EQOS_MAX_MASK_BYTE) && - (addr_mask > OSI_AMASK_DISABLE)) { - if ((idx > 0U) && (idx < EQOS_MAX_MAC_ADDR_REG)) { - *value = (*value | - ((addr_mask << EQOS_MAC_ADDRH_MBC_SHIFT) & - EQOS_MAC_ADDRH_MBC)); - } else { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid address index for MBC\n", - 0ULL); - return -1; - } - } - - return 0; -} - -/** - * @brief eqos_l2_filter_delete - Function to delete L2 filter - * - * @note - * Algorithm: - * - This helper routine is to delete L2 filter based on DCS and MBC - * parameter. - * - Handling for EQOS mac version 4.10 differently. - * - * @param[in] osi_core: OSI core private data structure. - * @param[out] value: nveu32_t pointer which has value read from register. - * @param[in] idx: filter index - * @param[in] dma_routing_enable: dma channel routing enable(1) - * @param[in] dma_chan: dma channel number - * - * @pre - * - MAC should be initialized and started. see osi_start_mac() - * - osi_core->osd should be populated. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_l2_filter_delete(struct osi_core_priv_data *osi_core, - nveu32_t *value, - const nveu32_t idx, - const nveu32_t dma_routing_enable, - const nveu32_t dma_chan) -{ - nveu32_t dcs_check = *value; - nveu32_t temp = OSI_DISABLE; - - osi_writela(osi_core, OSI_MAX_32BITS, - (nveu8_t *)osi_core->base + EQOS_MAC_ADDRL((idx))); - - *value |= OSI_MASK_16BITS; - if (dma_routing_enable == OSI_DISABLE || - osi_core->mac_ver < OSI_EQOS_MAC_5_00) { - *value &= ~(EQOS_MAC_ADDRH_AE | EQOS_MAC_ADDRH_DCS); - osi_writela(osi_core, *value, (nveu8_t *)osi_core->base + - EQOS_MAC_ADDRH((idx))); - return; - } - - dcs_check &= EQOS_MAC_ADDRH_DCS; - dcs_check = dcs_check >> EQOS_MAC_ADDRH_DCS_SHIFT; - - if (idx >= EQOS_MAX_MAC_ADDR_REG) { - dcs_check = OSI_DISABLE; - } else { - temp = OSI_BIT(dma_chan); - dcs_check &= ~(temp); - } - - if (dcs_check == OSI_DISABLE) { - *value &= ~(EQOS_MAC_ADDRH_AE | EQOS_MAC_ADDRH_DCS); - osi_writela(osi_core, *value, (nveu8_t *)osi_core->base + - EQOS_MAC_ADDRH((idx))); - } else { - *value &= ~(EQOS_MAC_ADDRH_DCS); - *value |= (dcs_check << EQOS_MAC_ADDRH_DCS_SHIFT); - osi_writela(osi_core, *value, (nveu8_t *)osi_core->base + - EQOS_MAC_ADDRH((idx))); - } - - return; -} - -/** - * @brief eqos_update_mac_addr_low_high_reg- Update L2 address in filter - * register - * - * @note - * Algorithm: - * - This routine validates index and addr of #osi_filter. - * - calls eqos_update_mac_addr_helper() to update DCS and MBS. - * dsc_en status performed before updating DCS bits. - * - Update MAC address to L2 filter register. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_018 - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter: OSI filter structure. - * - * @pre - * - MAC should be initialized and started. see osi_start_mac() - * - osi_core->osd should be populated. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_update_mac_addr_low_high_reg( - struct osi_core_priv_data *const osi_core, - const struct osi_filter *filter) -{ - nveu32_t idx = filter->index; - nveu32_t dma_routing_enable = filter->dma_routing; - nveu32_t dma_chan = filter->dma_chan; - nveu32_t addr_mask = filter->addr_mask; - nveu32_t src_dest = filter->src_dest; - nveu32_t value = OSI_DISABLE; - nve32_t ret = 0; - - if ((idx > (EQOS_MAX_MAC_ADDRESS_FILTER - 0x1U)) || - (dma_chan >= OSI_EQOS_MAX_NUM_CHANS)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid MAC filter index or channel number\n", - 0ULL); - return -1; - } - - /* read current value at index preserve DCS current value */ - value = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_MAC_ADDRH((idx))); - - /* High address reset DCS and AE bits*/ - if ((filter->oper_mode & OSI_OPER_ADDR_DEL) != OSI_NONE) { - eqos_l2_filter_delete(osi_core, &value, idx, dma_routing_enable, - dma_chan); - return 0; - } - - ret = eqos_update_mac_addr_helper(osi_core, &value, idx, dma_chan, - addr_mask, src_dest); - /* Check return value from helper code */ - if (ret == -1) { - return ret; - } - - /* Update AE bit if OSI_OPER_ADDR_UPDATE is set */ - if ((filter->oper_mode & OSI_OPER_ADDR_UPDATE) == - OSI_OPER_ADDR_UPDATE) { - value |= EQOS_MAC_ADDRH_AE; - } - - /* Setting Source/Destination Address match valid for 1 to 32 index */ - if (((idx > 0U) && (idx < EQOS_MAX_MAC_ADDR_REG)) && - (src_dest <= OSI_SA_MATCH)) { - value = (value | ((src_dest << EQOS_MAC_ADDRH_SA_SHIFT) & - EQOS_MAC_ADDRH_SA)); - } - - osi_writela(osi_core, ((nveu32_t)filter->mac_address[4] | - ((nveu32_t)filter->mac_address[5] << 8) | value), - (nveu8_t *)osi_core->base + EQOS_MAC_ADDRH((idx))); - - osi_writela(osi_core, ((nveu32_t)filter->mac_address[0] | - ((nveu32_t)filter->mac_address[1] << 8) | - ((nveu32_t)filter->mac_address[2] << 16) | - ((nveu32_t)filter->mac_address[3] << 24)), - (nveu8_t *)osi_core->base + EQOS_MAC_ADDRL((idx))); - - return ret; -} - -/** - * @brief eqos_config_ptp_offload - Enable/Disable PTP offload - * - * Algorithm: Based on input argument, update PTO and TSCR registers. - * Update ptp_filter for TSCR register. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] pto_config: The PTP Offload configuration from function - * driver. - * - * @note 1) MAC should be init and started. see osi_start_mac() - * 2) configure_ptp() should be called after this API - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int eqos_config_ptp_offload(struct osi_core_priv_data *osi_core, - struct osi_pto_config *const pto_config) -{ - unsigned char *addr = (unsigned char *)osi_core->base; - int ret = 0; - unsigned int value = 0x0U; - unsigned int ptc_value = 0x0U; - unsigned int port_id = 0x0U; - - /* Read MAC TCR */ - value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_TCR); - /* clear old configuration */ - value &= ~(EQOS_MAC_TCR_TSENMACADDR | OSI_MAC_TCR_SNAPTYPSEL_3 | - OSI_MAC_TCR_TSMASTERENA | OSI_MAC_TCR_TSEVENTENA | - OSI_MAC_TCR_TSENA | OSI_MAC_TCR_TSCFUPDT | - OSI_MAC_TCR_TSCTRLSSR | OSI_MAC_TCR_TSVER2ENA | - OSI_MAC_TCR_TSIPENA); - - /** Handle PTO disable */ - if (pto_config->en_dis == OSI_DISABLE) { - osi_core->ptp_config.ptp_filter = value; - osi_writela(osi_core, ptc_value, addr + EQOS_MAC_PTO_CR); - eqos_core_safety_writel(osi_core, value, addr + - EQOS_MAC_TCR, EQOS_MAC_TCR_IDX); - osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR0); - osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR1); - osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR2); - return 0; - } - - /** Handle PTO enable */ - /* Set PTOEN bit */ - ptc_value |= EQOS_MAC_PTO_CR_PTOEN; - ptc_value |= ((pto_config->domain_num << EQOS_MAC_PTO_CR_DN_SHIFT) - & EQOS_MAC_PTO_CR_DN); - /* Set TSCR register flag */ - value |= (OSI_MAC_TCR_TSENA | OSI_MAC_TCR_TSCFUPDT | - OSI_MAC_TCR_TSCTRLSSR | OSI_MAC_TCR_TSVER2ENA | - OSI_MAC_TCR_TSIPENA); - - if (pto_config->snap_type > 0U) { - /* Set APDREQEN bit if snap_type > 0 */ - ptc_value |= EQOS_MAC_PTO_CR_APDREQEN; - } - - /* Set SNAPTYPSEL for Taking Snapshots mode */ - value |= ((pto_config->snap_type << EQOS_MAC_TCR_SNAPTYPSEL_SHIFT) & - OSI_MAC_TCR_SNAPTYPSEL_3); - - /* Set/Reset TSMSTRENA bit for Master/Slave */ - if (pto_config->master == OSI_ENABLE) { - /* Set TSMSTRENA bit for master */ - value |= OSI_MAC_TCR_TSMASTERENA; - if (pto_config->snap_type != OSI_PTP_SNAP_P2P) { - /* Set ASYNCEN bit on PTO Control Register */ - ptc_value |= EQOS_MAC_PTO_CR_ASYNCEN; - } - } else { - /* Reset TSMSTRENA bit for slave */ - value &= ~OSI_MAC_TCR_TSMASTERENA; - } - - /* Set/Reset TSENMACADDR bit for UC/MC MAC */ - if (pto_config->mc_uc == OSI_ENABLE) { - /* Set TSENMACADDR bit for MC/UC MAC PTP filter */ - value |= EQOS_MAC_TCR_TSENMACADDR; - } else { - /* Reset TSENMACADDR bit */ - value &= ~EQOS_MAC_TCR_TSENMACADDR; - } - - /* Set TSEVENTENA bit for PTP events */ - value |= OSI_MAC_TCR_TSEVENTENA; - osi_core->ptp_config.ptp_filter = value; - /** Write PTO_CR and TCR registers */ - osi_writela(osi_core, ptc_value, addr + EQOS_MAC_PTO_CR); - eqos_core_safety_writel(osi_core, value, addr + EQOS_MAC_TCR, - EQOS_MAC_TCR_IDX); - /* Port ID for PTP offload packet created */ - port_id = pto_config->portid & EQOS_MAC_PIDR_PID_MASK; - osi_writela(osi_core, port_id, addr + EQOS_MAC_PIDR0); - osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR1); - osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR2); - - return ret; -} - -/** - * @brief eqos_config_l3_l4_filter_enable - register write to enable L3/L4 - * filters. - * - * @note - * Algorithm: - * - This routine to update filter_enb_dis value in IP filter enable register. - * - TraceID:ETHERNET_NVETHERNETRM_019 - * - * @param[in] osi_core: OSI core private data. - * @param[in] filter_enb_dis: enable/disable - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_config_l3_l4_filter_enable( - struct osi_core_priv_data *const osi_core, - const nveu32_t filter_enb_dis) -{ - nveu32_t value = 0U; - void *base = osi_core->base; - value = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_PFR); - value &= ~(EQOS_MAC_PFR_IPFE); - value |= ((filter_enb_dis << EQOS_MAC_PFR_IPFE_SHIFT) & - EQOS_MAC_PFR_IPFE); - eqos_core_safety_writel(osi_core, value, (nveu8_t *)base + EQOS_MAC_PFR, - EQOS_MAC_PFR_IDX); - - return 0; -} - -/** - * @brief eqos_update_ip4_addr - configure register for IPV4 address filtering - * - * @note - * Algorithm: - * - Validate addr for null, filter_no for max value and return -1 on failure. - * - Update IPv4 source/destination address for L3 layer filtering. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_019 - * - * @param[in] osi_core: OSI core private data structure. Used param base. - * @param[in] filter_no: filter index. Refer #osi_l3_l4_filter->filter_no for details. - * @param[in] addr: ipv4 address. Refer #osi_l3_l4_filter->ip4_addr for details. - * @param[in] src_dst_addr_match: Refer #osi_l3_l4_filter->src_dst_addr_match for details. - * - * @pre 1) MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_update_ip4_addr(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu8_t addr[], - const nveu32_t src_dst_addr_match) -{ - void *base = osi_core->base; - nveu32_t value = 0U; - nveu32_t temp = 0U; - - if (addr == OSI_NULL) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid address\n", 0ULL); - return -1; - } - - if (filter_no > (EQOS_MAX_L3_L4_FILTER - 0x1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (nveul64_t)filter_no); - return -1; - } - - value = addr[3]; - temp = (nveu32_t)addr[2] << 8; - value |= temp; - temp = (nveu32_t)addr[1] << 16; - value |= temp; - temp = (nveu32_t)addr[0] << 24; - value |= temp; - if (src_dst_addr_match == OSI_SOURCE_MATCH) { - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3_AD0R(filter_no)); - } else { - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3_AD1R(filter_no)); - } - return 0; +done: + return; } +#ifdef HSI_SUPPORT /** - * @brief eqos_update_ip6_addr - add ipv6 address in register + * @brief eqos_handle_hsi_intr - Handles HSI interrupt. * * @note * Algorithm: - * - Validate addr for null, filter_no for max value and return -1 on failure. - * - Update IPv6 source/destination address for L3 layer filtering. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_019 + * - Clear HSI interrupt source. * - * @param[in] osi_core: OSI core private data structure. Used param base. - * @param[in] filter_no: filter index. Refer #osi_l3_l4_filter->filter_no for details. - * @param[in] addr: ipv4 address. Refer #osi_l3_l4_filter->ip6_addr for details. + * @param[in] osi_core: OSI core private data structure. * * @pre MAC should be initialized and started. see osi_start_mac() * @@ -3702,235 +1766,235 @@ static nve32_t eqos_update_ip4_addr(struct osi_core_priv_data *const osi_core, * - Initialization: No * - Run time: Yes * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. */ -static nve32_t eqos_update_ip6_addr(struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu16_t addr[]) +static void eqos_handle_hsi_intr(struct osi_core_priv_data *const osi_core) { - void *base = osi_core->base; - nveu32_t value = 0U; - nveu32_t temp = 0U; + nveu32_t val = 0U; + nveu32_t val2 = 0U; + nveu64_t ce_count_threshold; - if (addr == OSI_NULL) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid address\n", 0ULL); - return -1; + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + EQOS_WRAP_COMMON_INTR_STATUS); + if (((val & EQOS_REGISTER_PARITY_ERR) == EQOS_REGISTER_PARITY_ERR) || + ((val & EQOS_CORE_UNCORRECTABLE_ERR) == EQOS_CORE_UNCORRECTABLE_ERR)) { + osi_core->hsi.err_code[UE_IDX] = OSI_HSI_EQOS0_UE_CODE; + osi_core->hsi.report_err = OSI_ENABLE; + osi_core->hsi.report_count_err[UE_IDX] = OSI_ENABLE; + /* Disable the interrupt */ + val2 = osi_readla(osi_core, (nveu8_t *)osi_core->base + + EQOS_WRAP_COMMON_INTR_ENABLE); + val2 &= ~EQOS_REGISTER_PARITY_ERR; + val2 &= ~EQOS_CORE_UNCORRECTABLE_ERR; + osi_writela(osi_core, val2, (nveu8_t *)osi_core->base + + EQOS_WRAP_COMMON_INTR_ENABLE); } - - if (filter_no > (EQOS_MAX_L3_L4_FILTER - 0x1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid filter index for L3/L4 filter\n", - (nveul64_t)filter_no); - return -1; + if ((val & EQOS_CORE_CORRECTABLE_ERR) == EQOS_CORE_CORRECTABLE_ERR) { + osi_core->hsi.err_code[CE_IDX] = OSI_HSI_EQOS0_CE_CODE; + osi_core->hsi.report_err = OSI_ENABLE; + osi_core->hsi.ce_count = + osi_update_stats_counter(osi_core->hsi.ce_count, 1UL); + ce_count_threshold = osi_core->hsi.ce_count / osi_core->hsi.err_count_threshold; + if (osi_core->hsi.ce_count_threshold < ce_count_threshold) { + osi_core->hsi.ce_count_threshold = ce_count_threshold; + osi_core->hsi.report_count_err[CE_IDX] = OSI_ENABLE; + } } + val &= ~EQOS_MAC_SBD_INTR; + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + EQOS_WRAP_COMMON_INTR_STATUS); - /* update Bits[31:0] of 128-bit IP addr */ - value = addr[7]; - temp = (nveu32_t)addr[6] << 16; - value |= temp; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3_AD0R(filter_no)); - /* update Bits[63:32] of 128-bit IP addr */ - value = addr[5]; - temp = (nveu32_t)addr[4] << 16; - value |= temp; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3_AD1R(filter_no)); - /* update Bits[95:64] of 128-bit IP addr */ - value = addr[3]; - temp = (nveu32_t)addr[2] << 16; - value |= temp; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3_AD2R(filter_no)); - /* update Bits[127:96] of 128-bit IP addr */ - value = addr[1]; - temp = (nveu32_t)addr[0] << 16; - value |= temp; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3_AD3R(filter_no)); + if (((val & EQOS_CORE_CORRECTABLE_ERR) == EQOS_CORE_CORRECTABLE_ERR) || + ((val & EQOS_CORE_UNCORRECTABLE_ERR) == EQOS_CORE_UNCORRECTABLE_ERR)) { - return 0; + /* Clear FSM error status. Clear on read */ + (void)osi_readla(osi_core, (nveu8_t *)osi_core->base + + EQOS_MAC_DPP_FSM_INTERRUPT_STATUS); + + /* Clear ECC error status register */ + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + EQOS_MTL_ECC_INTERRUPT_STATUS); + if (val != 0U) { + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + EQOS_MTL_ECC_INTERRUPT_STATUS); + } + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + + EQOS_DMA_ECC_INTERRUPT_STATUS); + if (val != 0U) { + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + + EQOS_DMA_ECC_INTERRUPT_STATUS); + } + } } +#endif /** - * @brief eqos_update_l4_port_no -program source port no + * @brief eqos_handle_common_intr - Handles common interrupt. * * @note * Algorithm: - * - Validate filter_no for max value and return -1 on failure. - * - Update port_no based on src_dst_port_match to confiure L4 layer filtering. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_019 + * - Reads DMA ISR register + * - Returns if calue is 0. + * - Handle Non-TI/RI interrupts for all MTL queues and + * increments #osi_core_priv_data->stats + * based on error detected per cahnnel. + * - Calls eqos_handle_mac_intrs() to handle MAC interrupts. + * - Refer to EQOS column of <> for API details. + * - TraceID:ETHERNET_NVETHERNETRM_010 * - * @param[in] osi_core: OSI core private data structure. Used param base. - * @param[in] filter_no: filter index. Refer #osi_l3_l4_filter->filter_no for details. - * @param[in] port_no: ipv4 address. Refer #osi_l3_l4_filter->port_no for details. - * @param[in] src_dst_port_match: Refer #osi_l3_l4_filter->src_dst_port_match for details. + * @param[in] osi_core: OSI core private data structure. * - * @pre - * - MAC should be initialized and started. see osi_start_mac() - * - osi_core->osd should be populated. - * - DCS bits should be enabled in RXQ to DMA mapping register + * @pre MAC should be initialized and started. see osi_start_mac() * * @note * API Group: - * - Initialization: Yes + * - Initialization: No * - Run time: Yes * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. */ -static nve32_t eqos_update_l4_port_no( - struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu16_t port_no, - const nveu32_t src_dst_port_match) +static void eqos_handle_common_intr(struct osi_core_priv_data *const osi_core) { void *base = osi_core->base; - nveu32_t value = 0U; - nveu32_t temp = 0U; + nveu32_t dma_isr = 0; + nveu32_t qinx = 0; + nveu32_t i = 0; + nveu32_t dma_sr = 0; + nveu32_t dma_ier = 0; + nveu32_t mtl_isr = 0; + nveu32_t frp_isr = 0U; - if (filter_no > (EQOS_MAX_L3_L4_FILTER - 0x1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (nveul64_t)filter_no); - return -1; + if (osi_core->mac_ver >= OSI_EQOS_MAC_5_30) { + osi_writela(osi_core, EQOS_MAC_SBD_INTR, (nveu8_t *)osi_core->base + + EQOS_WRAP_COMMON_INTR_STATUS); +#ifdef HSI_SUPPORT + if (osi_core->hsi.enabled == OSI_ENABLE) { + eqos_handle_hsi_intr(osi_core); + } +#endif } - value = osi_readla(osi_core, - (nveu8_t *)base + EQOS_MAC_L4_ADR(filter_no)); - if (src_dst_port_match == OSI_SOURCE_MATCH) { - value &= ~EQOS_MAC_L4_SP_MASK; - value |= ((nveu32_t)port_no & EQOS_MAC_L4_SP_MASK); + dma_isr = osi_readla(osi_core, (nveu8_t *)base + EQOS_DMA_ISR); + if (dma_isr != 0U) { + //FIXME Need to check how we can get the DMA channel here instead of + //MTL Queues + if ((dma_isr & EQOS_DMA_CHAN_INTR_STATUS) != 0U) { + /* Handle Non-TI/RI interrupts */ + for (i = 0; i < osi_core->num_mtl_queues; i++) { + qinx = osi_core->mtl_queues[i]; + if (qinx >= OSI_EQOS_MAX_NUM_CHANS) { + continue; + } + + /* read dma channel status register */ + dma_sr = osi_readla(osi_core, (nveu8_t *)base + + EQOS_DMA_CHX_STATUS(qinx)); + /* read dma channel interrupt enable register */ + dma_ier = osi_readla(osi_core, (nveu8_t *)base + + EQOS_DMA_CHX_IER(qinx)); + + /* process only those interrupts which we + * have enabled. + */ + dma_sr = (dma_sr & dma_ier); + + /* mask off RI and TI */ + dma_sr &= ~(OSI_BIT(6) | OSI_BIT(0)); + if (dma_sr == 0U) { + continue; + } + + /* ack non ti/ri ints */ + osi_writela(osi_core, dma_sr, (nveu8_t *)base + + EQOS_DMA_CHX_STATUS(qinx)); +#ifndef OSI_STRIPPED_LIB + update_dma_sr_stats(osi_core, dma_sr, qinx); +#endif /* !OSI_STRIPPED_LIB */ + } + } + + eqos_handle_mac_intrs(osi_core, dma_isr); + + /* Handle MTL inerrupts */ + mtl_isr = osi_readla(osi_core, (nveu8_t *)base + EQOS_MTL_INTR_STATUS); + if (((mtl_isr & EQOS_MTL_IS_ESTIS) == EQOS_MTL_IS_ESTIS) && + ((dma_isr & EQOS_DMA_ISR_MTLIS) == EQOS_DMA_ISR_MTLIS)) { + eqos_handle_mtl_intrs(osi_core); + mtl_isr &= ~EQOS_MTL_IS_ESTIS; + osi_writela(osi_core, mtl_isr, (nveu8_t *)base + EQOS_MTL_INTR_STATUS); + } + + /* Clear FRP Interrupt MTL_RXP_Interrupt_Control_Status */ + frp_isr = osi_readla(osi_core, (nveu8_t *)base + EQOS_MTL_RXP_INTR_CS); + frp_isr |= (EQOS_MTL_RXP_INTR_CS_NVEOVIS | EQOS_MTL_RXP_INTR_CS_NPEOVIS | + EQOS_MTL_RXP_INTR_CS_FOOVIS | EQOS_MTL_RXP_INTR_CS_PDRFIS); + osi_writela(osi_core, frp_isr, (nveu8_t *)base + EQOS_MTL_RXP_INTR_CS); } else { - value &= ~EQOS_MAC_L4_DP_MASK; - temp = port_no; - value |= ((temp << EQOS_MAC_L4_DP_SHIFT) & EQOS_MAC_L4_DP_MASK); + /* Do Nothing */ } - osi_writela(osi_core, value, - (nveu8_t *)base + EQOS_MAC_L4_ADR(filter_no)); - - return 0; } -/** \cond DO_NOT_DOCUMENT */ +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /** - * @brief eqos_set_dcs - check and update dma routing register + * @brief eqos_config_mac_tx - Enable/Disable MAC Tx * * @note * Algorithm: - * - Check for request for DCS_enable as well as validate chan - * number and dcs_enable is set. After validation, this sequence is used - * to configure L3((IPv4/IPv6) filters for address matching. + * - Enable or Disables MAC Transmitter * * @param[in] osi_core: OSI core private data structure. - * @param[in] value: nveu32_t value for caller - * @param[in] dma_routing_enable: filter based dma routing enable(1) - * @param[in] dma_chan: dma channel for routing based on filter - * - * @pre - * - MAC should be initialized and started. see osi_start_mac() - * - DCS bit of RxQ should be enabled for dynamic channel selection - * in filter support - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - * - *@return updated nveu32_t value - */ -static inline nveu32_t eqos_set_dcs( - struct osi_core_priv_data *const osi_core, - nveu32_t value, - nveu32_t dma_routing_enable, - nveu32_t dma_chan) -{ - nveu32_t t_val = value; - - if ((dma_routing_enable == OSI_ENABLE) && (dma_chan < - OSI_EQOS_MAX_NUM_CHANS) && (osi_core->dcs_en == - OSI_ENABLE)) { - t_val |= ((dma_routing_enable << - EQOS_MAC_L3L4_CTR_DMCHEN0_SHIFT) & - EQOS_MAC_L3L4_CTR_DMCHEN0); - t_val |= ((dma_chan << - EQOS_MAC_L3L4_CTR_DMCHN0_SHIFT) & - EQOS_MAC_L3L4_CTR_DMCHN0); - } - - return t_val; -} - -/** - * @brief eqos_helper_l3l4_bitmask - helper function to set L3L4 - * bitmask. - * - * @note - * Algorithm: - * - set bit corresponding to L3l4 filter index + * @param[in] enable: Enable or Disable.MAC Tx * - * @param[out] bitmask: bit mask OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] value: 0 - disable otherwise - l3/l4 filter enabled + * @pre MAC init should be complete. See osi_hw_core_init() * * @note * API Group: - * - Initialization: Yes + * - Initialization: No * - Run time: Yes * - De-initialization: No - * - * @pre MAC should be initialized and started. see osi_start_mac() - * */ -static inline void eqos_helper_l3l4_bitmask(nveu32_t *bitmask, - nveu32_t filter_no, - nveu32_t value) +static void eqos_config_mac_tx(struct osi_core_priv_data *const osi_core, + const nveu32_t enable) { - nveu32_t temp; + nveu32_t value; + void *addr = osi_core->base; - /* Set bit mask for index */ - temp = OSI_ENABLE; - temp = temp << filter_no; - /* check against all bit fields for L3L4 filter enable */ - if ((value & EQOS_MAC_L3L4_CTRL_ALL) != OSI_DISABLE) { - *bitmask |= temp; + if (enable == OSI_ENABLE) { + value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_MCR); + /* Enable MAC Transmit */ + value |= EQOS_MCR_TE; + osi_writela(osi_core, value, (nveu8_t *)addr + EQOS_MAC_MCR); } else { - *bitmask &= ~temp; + value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_MCR); + /* Disable MAC Transmit */ + value &= ~EQOS_MCR_TE; + osi_writela(osi_core, value, (nveu8_t *)addr + EQOS_MAC_MCR); } } -/** \endcond */ +#endif /* MACSEC_SUPPORT */ /** - * @brief eqos_config_l3_filters - config L3 filters. + * @brief eqos_update_mac_addr_helper - Function to update DCS and MBC; helper function for + * eqos_update_mac_addr_low_high_reg() * * @note * Algorithm: - * - Validate filter_no for maximum and hannel number if dma_routing_enable - * is OSI_ENABLE and reitrn -1 if fails. - * - Configure L3 filter register based on all arguments(except for osi_core and dma_routing_enable) - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_019 - * - * @param[in, out] osi_core: OSI core private data structure. Used param is base. - * @param[in] filter_no: filter index. Max EQOS_MAX_L3_L4_FILTER - 1. - * @param[in] enb_dis: OSI_ENABLE - enable otherwise - disable L3 filter. - * @param[in] ipv4_ipv6_match: OSI_IPV6_MATCH - IPv6, otherwise - IPv4. - * @param[in] src_dst_addr_match: OSI_SOURCE_MATCH - source, otherwise - destination. - * @param[in] perfect_inverse_match: normal match(0) or inverse map(1). - * @param[in] dma_routing_enable: Valid value OSI_ENABLE, invalid otherwise. - * @param[in] dma_chan: dma channel for routing based on filter. Max OSI_EQOS_MAX_NUM_CHANS-1. + * - Validation of dma_chan if dma_routing_enable is OSI_ENABLE and addr_mask + * - corresponding sections not updated if invalid. + * - This helper routine is to update value parameter based on DCS and MBC + * sections of L2 register. + * dsc_en status performed before updating DCS bits. + * - Refer to EQOS column of <> for API details. + * - TraceID:ETHERNET_NVETHERNETRM_018 + * + * @param[in] osi_core: OSI core private data structure. Used param base. + * @param[out] value: nveu32_t pointer which has value read from register. + * @param[in] idx: Refer #osi_filter->index for details. + * @param[in] dma_chan: Refer #osi_filter->dma_chan for details. + * @param[in] addr_mask: Refer #osi_filter->addr_mask for details. + * @param[in] src_dest: source/destination MAC address. * * @pre * - MAC should be initialized and started. see osi_start_mac() * - osi_core->osd should be populated. - * - DCS bit of RxQ should be enabled for dynamic channel selection - * in filter support * * @note * API Group: @@ -3941,176 +2005,68 @@ static inline void eqos_helper_l3l4_bitmask(nveu32_t *bitmask, * @retval 0 on success * @retval -1 on failure. */ -static nve32_t eqos_config_l3_filters( - struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu32_t enb_dis, - const nveu32_t ipv4_ipv6_match, - const nveu32_t src_dst_addr_match, - const nveu32_t perfect_inverse_match, - const nveu32_t dma_routing_enable, - const nveu32_t dma_chan) +static inline nve32_t eqos_update_mac_addr_helper( + const struct osi_core_priv_data *osi_core, + nveu32_t *value, + const nveu32_t idx, + const nveu32_t dma_chan, + const nveu32_t addr_mask, + OSI_UNUSED const nveu32_t src_dest) { - nveu32_t value = 0U; - void *base = osi_core->base; - - if (filter_no > (EQOS_MAX_L3_L4_FILTER - 0x1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (nveul64_t)filter_no); - return -1; - } + nveu32_t temp; + nve32_t ret = 0; - if ((dma_routing_enable == OSI_ENABLE) && - (dma_chan > (OSI_EQOS_MAX_NUM_CHANS - 1U))) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "Wrong DMA channel\n", (nveul64_t)dma_chan); - return -1; + /* PDC bit of MAC_Ext_Configuration register is set so binary + * value representation form index 32-127 else hot-bit + * representation. + */ + if ((idx < EQOS_MAX_MAC_ADDR_REG) && + (osi_core->mac_ver >= OSI_EQOS_MAC_5_00)) { + *value &= EQOS_MAC_ADDRH_DCS; + temp = OSI_BIT(dma_chan); + temp = temp << EQOS_MAC_ADDRH_DCS_SHIFT; + temp = temp & EQOS_MAC_ADDRH_DCS; + *value = *value | temp; + } else { + *value = OSI_DISABLE; + temp = dma_chan; + temp = temp << EQOS_MAC_ADDRH_DCS_SHIFT; + temp = temp & EQOS_MAC_ADDRH_DCS; + *value = temp; } - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3L4_CTR_L3PEN0; - value |= (ipv4_ipv6_match & EQOS_MAC_L3L4_CTR_L3PEN0); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - - /* For IPv6 either SA/DA can be checked not both */ - if (ipv4_ipv6_match == OSI_IPV6_MATCH) { - if (enb_dis == OSI_ENABLE) { - if (src_dst_addr_match == OSI_SOURCE_MATCH) { - /* Enable L3 filters for IPv6 SOURCE addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3_IP6_CTRL_CLEAR; - value |= ((EQOS_MAC_L3L4_CTR_L3SAM0 | - (perfect_inverse_match << - EQOS_MAC_L3L4_CTR_L3SAI_SHIFT)) & - ((EQOS_MAC_L3L4_CTR_L3SAM0 | - EQOS_MAC_L3L4_CTR_L3SAIM0))); - value |= eqos_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - - } else { - /* Enable L3 filters for IPv6 DESTINATION addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3_IP6_CTRL_CLEAR; - value |= ((EQOS_MAC_L3L4_CTR_L3DAM0 | - (perfect_inverse_match << - EQOS_MAC_L3L4_CTR_L3DAI_SHIFT)) & - ((EQOS_MAC_L3L4_CTR_L3DAM0 | - EQOS_MAC_L3L4_CTR_L3DAIM0))); - value |= eqos_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } - } else { - /* Disable L3 filters for IPv6 SOURCE/DESTINATION addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~(EQOS_MAC_L3_IP6_CTRL_CLEAR | - EQOS_MAC_L3L4_CTR_L3PEN0); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } - } else { - if (src_dst_addr_match == OSI_SOURCE_MATCH) { - if (enb_dis == OSI_ENABLE) { - /* Enable L3 filters for IPv4 SOURCE addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3_IP4_SA_CTRL_CLEAR; - value |= ((EQOS_MAC_L3L4_CTR_L3SAM0 | - (perfect_inverse_match << - EQOS_MAC_L3L4_CTR_L3SAI_SHIFT)) & - ((EQOS_MAC_L3L4_CTR_L3SAM0 | - EQOS_MAC_L3L4_CTR_L3SAIM0))); - value |= eqos_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } else { - /* Disable L3 filters for IPv4 SOURCE addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3_IP4_SA_CTRL_CLEAR; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } + /* Address mask is valid for address 1 to 31 index only */ + if ((addr_mask <= EQOS_MAX_MASK_BYTE) && + (addr_mask > OSI_AMASK_DISABLE)) { + if ((idx > 0U) && (idx < EQOS_MAX_MAC_ADDR_REG)) { + *value = (*value | + ((addr_mask << EQOS_MAC_ADDRH_MBC_SHIFT) & + EQOS_MAC_ADDRH_MBC)); } else { - if (enb_dis == OSI_ENABLE) { - /* Enable L3 filters for IPv4 DESTINATION addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3_IP4_DA_CTRL_CLEAR; - value |= ((EQOS_MAC_L3L4_CTR_L3DAM0 | - (perfect_inverse_match << - EQOS_MAC_L3L4_CTR_L3DAI_SHIFT)) & - ((EQOS_MAC_L3L4_CTR_L3DAM0 | - EQOS_MAC_L3L4_CTR_L3DAIM0))); - value |= eqos_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } else { - /* Disable L3 filters for IPv4 DESTINATION addr - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3_IP4_DA_CTRL_CLEAR; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - } + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "invalid address index for MBC\n", + 0ULL); + ret = -1; } } - /* Set bit corresponding to filter index if value is non-zero */ - eqos_helper_l3l4_bitmask(&osi_core->l3l4_filter_bitmask, - filter_no, value); - - return 0; + return ret; } /** - * @brief eqos_config_l4_filters - Config L4 filters. + * @brief eqos_l2_filter_delete - Function to delete L2 filter * * @note * Algorithm: - * - Validate filter_no for maximum and hannel number if dma_routing_enable - * is OSI_ENABLE and reitrn -1 if fails. - * - Configure L4 filter register based on all arguments(except for osi_core and dma_routing_enable) - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_019 - * - * @param[in, out] osi_core: OSI core private data structure. Used param is base. - * @param[in] filter_no: filter index. Max EQOS_MAX_L3_L4_FILTER - 1. - * @param[in] enb_dis: OSI_ENABLE - enable, otherwise - disable L4 filter - * @param[in] tcp_udp_match: 1 - udp, 0 - tcp - * @param[in] src_dst_port_match: OSI_SOURCE_MATCH - source port, otherwise - dest port - * @param[in] perfect_inverse_match: normal match(0) or inverse map(1) - * @param[in] dma_routing_enable: Valid value OSI_ENABLE, invalid otherwise. - * @param[in] dma_chan: dma channel for routing based on filter. Max OSI_EQOS_MAX_NUM_CHANS-1. + * - This helper routine is to delete L2 filter based on DCS and MBC + * parameter. + * - Handling for EQOS mac version 4.10 differently. + * + * @param[in] osi_core: OSI core private data structure. + * @param[out] value: nveu32_t pointer which has value read from register. + * @param[in] filter_idx: filter index + * @param[in] dma_routing_enable: dma channel routing enable(1) + * @param[in] dma_chan: dma channel number * * @pre * - MAC should be initialized and started. see osi_start_mac() @@ -4121,331 +2077,315 @@ static nve32_t eqos_config_l3_filters( * - Initialization: Yes * - Run time: Yes * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. */ -static nve32_t eqos_config_l4_filters( - struct osi_core_priv_data *const osi_core, - const nveu32_t filter_no, - const nveu32_t enb_dis, - const nveu32_t tcp_udp_match, - const nveu32_t src_dst_port_match, - const nveu32_t perfect_inverse_match, +static void eqos_l2_filter_delete(struct osi_core_priv_data *osi_core, + nveu32_t *value, + const nveu32_t filter_idx, const nveu32_t dma_routing_enable, const nveu32_t dma_chan) { - void *base = osi_core->base; - nveu32_t value = 0U; + nveu32_t dcs_check = *value; + nveu32_t temp = OSI_DISABLE; + nveu32_t idx = (filter_idx & 0xFFU); - if (filter_no > (EQOS_MAX_L3_L4_FILTER - 0x1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (nveul64_t)filter_no); - return -1; - } + osi_writela(osi_core, OSI_MAX_32BITS, + (nveu8_t *)osi_core->base + EQOS_MAC_ADDRL((idx))); - if ((dma_routing_enable == OSI_ENABLE) && - (dma_chan > (OSI_EQOS_MAX_NUM_CHANS - 1U))) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "Wrong DMA channel\n", (nveu32_t)dma_chan); - return -1; - } + *value |= OSI_MASK_16BITS; + if ((dma_routing_enable == OSI_DISABLE) || + (osi_core->mac_ver < OSI_EQOS_MAC_5_00)) { + *value &= ~(EQOS_MAC_ADDRH_AE | EQOS_MAC_ADDRH_DCS); + osi_writela(osi_core, *value, (nveu8_t *)osi_core->base + + EQOS_MAC_ADDRH((idx))); + } else { + + dcs_check &= EQOS_MAC_ADDRH_DCS; + dcs_check = dcs_check >> EQOS_MAC_ADDRH_DCS_SHIFT; - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L3L4_CTR_L4PEN0; - value |= ((tcp_udp_match << EQOS_MAC_L3L4_CTR_L4PEN0_SHIFT) - & EQOS_MAC_L3L4_CTR_L4PEN0); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - - if (src_dst_port_match == OSI_SOURCE_MATCH) { - if (enb_dis == OSI_ENABLE) { - /* Enable L4 filters for SOURCE Port No matching */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L4_SP_CTRL_CLEAR; - value |= ((EQOS_MAC_L3L4_CTR_L4SPM0 | - (perfect_inverse_match << - EQOS_MAC_L3L4_CTR_L4SPI_SHIFT)) & - (EQOS_MAC_L3L4_CTR_L4SPM0 | - EQOS_MAC_L3L4_CTR_L4SPIM0)); - value |= eqos_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); + if (idx >= EQOS_MAX_MAC_ADDR_REG) { + dcs_check = OSI_DISABLE; } else { - /* Disable L4 filters for SOURCE Port No matching */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L4_SP_CTRL_CLEAR; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); + temp = OSI_BIT(dma_chan); + dcs_check &= ~(temp); } - } else { - if (enb_dis == OSI_ENABLE) { - /* Enable L4 filters for DESTINATION port No - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L4_DP_CTRL_CLEAR; - value |= ((EQOS_MAC_L3L4_CTR_L4DPM0 | - (perfect_inverse_match << - EQOS_MAC_L3L4_CTR_L4DPI_SHIFT)) & - (EQOS_MAC_L3L4_CTR_L4DPM0 | - EQOS_MAC_L3L4_CTR_L4DPIM0)); - value |= eqos_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); + + if (dcs_check == OSI_DISABLE) { + *value &= ~(EQOS_MAC_ADDRH_AE | EQOS_MAC_ADDRH_DCS); + osi_writela(osi_core, *value, (nveu8_t *)osi_core->base + + EQOS_MAC_ADDRH((idx))); } else { - /* Disable L4 filters for DESTINATION port No - * matching - */ - value = osi_readla(osi_core, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); - value &= ~EQOS_MAC_L4_DP_CTRL_CLEAR; - osi_writela(osi_core, value, (nveu8_t *)base + - EQOS_MAC_L3L4_CTR(filter_no)); + *value &= ~(EQOS_MAC_ADDRH_DCS); + *value |= (dcs_check << EQOS_MAC_ADDRH_DCS_SHIFT); + osi_writela(osi_core, *value, (nveu8_t *)osi_core->base + + EQOS_MAC_ADDRH((idx))); } } - /* Set bit corresponding to filter index if value is non-zero */ - eqos_helper_l3l4_bitmask(&osi_core->l3l4_filter_bitmask, - filter_no, value); - return 0; + return; } /** - * @brief eqos_poll_for_tsinit_complete - Poll for time stamp init complete + * @brief eqos_update_mac_addr_low_high_reg- Update L2 address in filter + * register * * @note * Algorithm: - * - Read TSINIT value from MAC TCR register until it is equal to zero. - * - Max loop count of 1000 with 1 ms delay between iterations. - * - SWUD_ID: ETHERNET_NVETHERNETRM_005_1 + * - This routine validates index and addr of #osi_filter. + * - calls eqos_update_mac_addr_helper() to update DCS and MBS. + * dsc_en status performed before updating DCS bits. + * - Update MAC address to L2 filter register. + * - Refer to EQOS column of <> for API details. + * - TraceID:ETHERNET_NVETHERNETRM_018 * - * @param[in] osi_core: OSI core private data structure. Used param is base, osd_ops.udelay. - * @param[in, out] mac_tcr: Address to store time stamp control register read - * value + * @param[in] osi_core: OSI core private data structure. + * @param[in] filter: OSI filter structure. * - * @pre MAC should be initialized and started. see osi_start_mac() + * @pre + * - MAC should be initialized and started. see osi_start_mac() + * - osi_core->osd should be populated. * * @note * API Group: - * - Initialization: No + * - Initialization: Yes * - Run time: Yes * - De-initialization: No * * @retval 0 on success * @retval -1 on failure. */ -static inline nve32_t eqos_poll_for_tsinit_complete( +static nve32_t eqos_update_mac_addr_low_high_reg( struct osi_core_priv_data *const osi_core, - nveu32_t *mac_tcr) + const struct osi_filter *filter) { - nveu32_t retry = RETRY_COUNT; - nveu32_t count; - nve32_t cond = COND_NOT_MET; + const struct core_local *l_core = (struct core_local *)(void *)osi_core; + nveu32_t idx = filter->index; + nveu32_t dma_routing_enable = filter->dma_routing; + nveu32_t dma_chan = filter->dma_chan; + nveu32_t addr_mask = filter->addr_mask; + nveu32_t src_dest = filter->src_dest; + nveu32_t value = OSI_DISABLE; + nve32_t ret = 0; + const nveu32_t eqos_max_madd[2] = {EQOS_MAX_MAC_ADDRESS_FILTER, + EQOS_MAX_MAC_5_3_ADDRESS_FILTER}; - /* Wait for previous(if any) Initialize Timestamp value - * update to complete - */ - count = 0; - while (cond == COND_NOT_MET) { - if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "poll_for_tsinit: timeout\n", 0ULL); - return -1; + if ((idx >= eqos_max_madd[l_core->l_mac_ver]) || + (dma_chan >= OSI_EQOS_MAX_NUM_CHANS)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "invalid MAC filter index or channel number\n", + 0ULL); + ret = -1; + goto fail; + } + + /* read current value at index preserve DCS current value */ + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + EQOS_MAC_ADDRH((idx))); + + /* High address reset DCS and AE bits*/ + if ((filter->oper_mode & OSI_OPER_ADDR_DEL) != OSI_NONE) { + eqos_l2_filter_delete(osi_core, &value, idx, dma_routing_enable, + dma_chan); + } else { + ret = eqos_update_mac_addr_helper(osi_core, &value, idx, dma_chan, + addr_mask, src_dest); + /* Check return value from helper code */ + if (ret == -1) { + goto fail; } - /* Read and Check TSINIT in MAC_Timestamp_Control register */ - *mac_tcr = osi_readla(osi_core, (nveu8_t *)osi_core->base + - EQOS_MAC_TCR); - if ((*mac_tcr & EQOS_MAC_TCR_TSINIT) == 0U) { - cond = COND_MET; + + /* Update AE bit if OSI_OPER_ADDR_UPDATE is set */ + if ((filter->oper_mode & OSI_OPER_ADDR_UPDATE) == OSI_OPER_ADDR_UPDATE) { + value |= EQOS_MAC_ADDRH_AE; } - count++; - osi_core->osd_ops.udelay(OSI_DELAY_1000US); - } + /* Setting Source/Destination Address match valid for 1 to 32 index */ + if (((idx > 0U) && (idx < EQOS_MAX_MAC_ADDR_REG)) && (src_dest <= OSI_SA_MATCH)) { + value = (value | ((src_dest << EQOS_MAC_ADDRH_SA_SHIFT) & + EQOS_MAC_ADDRH_SA)); + } - return 0; + osi_writela(osi_core, ((nveu32_t)filter->mac_address[4] | + ((nveu32_t)filter->mac_address[5] << 8) | value), + (nveu8_t *)osi_core->base + EQOS_MAC_ADDRH((idx))); + + osi_writela(osi_core, ((nveu32_t)filter->mac_address[0] | + ((nveu32_t)filter->mac_address[1] << 8) | + ((nveu32_t)filter->mac_address[2] << 16) | + ((nveu32_t)filter->mac_address[3] << 24)), + (nveu8_t *)osi_core->base + EQOS_MAC_ADDRL((idx))); + } +fail: + return ret; } +#ifndef OSI_STRIPPED_LIB /** - * @brief eqos_set_systime_to_mac - Set system time - * - * @note - * Algorithm: - * - Updates system time (seconds and nano seconds) in hardware registers. - * - Calls eqos_poll_for_tsinit_complete() before and after setting time. - * - return -1 if API fails. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_005 + * @brief eqos_config_ptp_offload - Enable/Disable PTP offload * - * @param[in] osi_core: OSI core private data structure. Used param is base. - * @param[in] sec: Seconds to be configured - * @param[in] nsec: Nano Seconds to be configured + * Algorithm: Based on input argument, update PTO and TSCR registers. + * Update ptp_filter for TSCR register. * - * @pre MAC should be initialized and started. see osi_start_mac() + * @param[in] osi_core: OSI core private data structure. + * @param[in] pto_config: The PTP Offload configuration from function + * driver. * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No + * @note 1) MAC should be init and started. see osi_start_mac() + * 2) configure_ptp() should be called after this API * * @retval 0 on success * @retval -1 on failure. */ -static nve32_t eqos_set_systime_to_mac( - struct osi_core_priv_data *const osi_core, - const nveu32_t sec, - const nveu32_t nsec) +static nve32_t eqos_config_ptp_offload(struct osi_core_priv_data *const osi_core, + struct osi_pto_config *const pto_config) { - void *addr = osi_core->base; - nveu32_t mac_tcr; - nve32_t ret; - - /* To be sure previous write was flushed (if Any) */ - ret = eqos_poll_for_tsinit_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } + nveu8_t *addr = (nveu8_t *)osi_core->base; + nve32_t ret = 0; + nveu32_t value = 0x0U; + nveu32_t ptc_value = 0x0U; + nveu32_t port_id = 0x0U; - /* write seconds value to MAC_System_Time_Seconds_Update register */ - osi_writela(osi_core, sec, (nveu8_t *)addr + EQOS_MAC_STSUR); + /* Read MAC TCR */ + value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_TCR); + /* clear old configuration */ + value &= ~(EQOS_MAC_TCR_TSENMACADDR | OSI_MAC_TCR_SNAPTYPSEL_3 | + OSI_MAC_TCR_TSMASTERENA | OSI_MAC_TCR_TSEVENTENA | + OSI_MAC_TCR_TSENA | OSI_MAC_TCR_TSCFUPDT | + OSI_MAC_TCR_TSCTRLSSR | OSI_MAC_TCR_TSVER2ENA | + OSI_MAC_TCR_TSIPENA); - /* write nano seconds value to MAC_System_Time_Nanoseconds_Update - * register - */ - osi_writela(osi_core, nsec, (nveu8_t *)addr + EQOS_MAC_STNSUR); + /** Handle PTO disable */ + if (pto_config->en_dis == OSI_DISABLE) { + osi_core->ptp_config.ptp_filter = value; + osi_writela(osi_core, ptc_value, addr + EQOS_MAC_PTO_CR); + osi_writela(osi_core, value, addr + EQOS_MAC_TCR); + osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR0); + osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR1); + osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR2); + return 0; + } - /* issue command to update the configured secs and nsecs values */ - mac_tcr |= EQOS_MAC_TCR_TSINIT; - eqos_core_safety_writel(osi_core, mac_tcr, - (nveu8_t *)addr + EQOS_MAC_TCR, - EQOS_MAC_TCR_IDX); + /** Handle PTO enable */ + /* Set PTOEN bit */ + ptc_value |= EQOS_MAC_PTO_CR_PTOEN; + ptc_value |= ((pto_config->domain_num << EQOS_MAC_PTO_CR_DN_SHIFT) + & EQOS_MAC_PTO_CR_DN); + /* Set TSCR register flag */ + value |= (OSI_MAC_TCR_TSENA | OSI_MAC_TCR_TSCFUPDT | + OSI_MAC_TCR_TSCTRLSSR | OSI_MAC_TCR_TSVER2ENA | + OSI_MAC_TCR_TSIPENA); - ret = eqos_poll_for_tsinit_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; + if (pto_config->snap_type > 0U) { + /* Set APDREQEN bit if snap_type > 0 */ + ptc_value |= EQOS_MAC_PTO_CR_APDREQEN; } - return 0; -} - -/** - * @brief eqos_poll_for_addend_complete - Poll for addend value write complete - * - * @note - * Algorithm: - * - Read TSADDREG value from MAC TCR register until it is equal to zero. - * - Max loop count of 1000 with 1 ms delay between iterations. - * - SWUD_ID: ETHERNET_NVETHERNETRM_023_1 - * - * @param[in] osi_core: OSI core private data structure. Used param is base, osd_ops.udelay. - * @param[in, out] mac_tcr: Address to store time stamp control register read - * value - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static inline nve32_t eqos_poll_for_addend_complete( - struct osi_core_priv_data *const osi_core, - nveu32_t *mac_tcr) -{ - nveu32_t retry = RETRY_COUNT; - nveu32_t count; - nve32_t cond = COND_NOT_MET; + /* Set SNAPTYPSEL for Taking Snapshots mode */ + value |= ((pto_config->snap_type << EQOS_MAC_TCR_SNAPTYPSEL_SHIFT) & + OSI_MAC_TCR_SNAPTYPSEL_3); - /* Wait for previous(if any) addend value update to complete */ - /* Poll */ - count = 0; - while (cond == COND_NOT_MET) { - if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "poll_for_addend: timeout\n", 0ULL); - return -1; - } - /* Read and Check TSADDREG in MAC_Timestamp_Control register */ - *mac_tcr = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MAC_TCR); - if ((*mac_tcr & EQOS_MAC_TCR_TSADDREG) == 0U) { - cond = COND_MET; + /* Set/Reset TSMSTRENA bit for Master/Slave */ + if (pto_config->master == OSI_ENABLE) { + /* Set TSMSTRENA bit for master */ + value |= OSI_MAC_TCR_TSMASTERENA; + if (pto_config->snap_type != OSI_PTP_SNAP_P2P) { + /* Set ASYNCEN bit on PTO Control Register */ + ptc_value |= EQOS_MAC_PTO_CR_ASYNCEN; } + } else { + /* Reset TSMSTRENA bit for slave */ + value &= ~OSI_MAC_TCR_TSMASTERENA; + } - count++; - osi_core->osd_ops.udelay(OSI_DELAY_1000US); + /* Set/Reset TSENMACADDR bit for UC/MC MAC */ + if (pto_config->mc_uc == OSI_ENABLE) { + /* Set TSENMACADDR bit for MC/UC MAC PTP filter */ + value |= EQOS_MAC_TCR_TSENMACADDR; + } else { + /* Reset TSENMACADDR bit */ + value &= ~EQOS_MAC_TCR_TSENMACADDR; } - return 0; + /* Set TSEVENTENA bit for PTP events */ + value |= OSI_MAC_TCR_TSEVENTENA; + osi_core->ptp_config.ptp_filter = value; + /** Write PTO_CR and TCR registers */ + osi_writela(osi_core, ptc_value, addr + EQOS_MAC_PTO_CR); + osi_writela(osi_core, value, addr + EQOS_MAC_TCR); + /* Port ID for PTP offload packet created */ + port_id = pto_config->portid & EQOS_MAC_PIDR_PID_MASK; + osi_writela(osi_core, port_id, addr + EQOS_MAC_PIDR0); + osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR1); + osi_writela(osi_core, OSI_NONE, addr + EQOS_MAC_PIDR2); + + return ret; } +#endif /* !OSI_STRIPPED_LIB */ /** - * @brief eqos_config_addend - Configure addend + * @brief eqos_config_l3l4_filters - Config L3L4 filters. * * @note * Algorithm: - * - Updates the Addend value in HW register - * - Calls eqos_poll_for_addend_complete() before and after setting time. - * - return -1 if API fails. - * - Refer to EQOS column of <> for API details. - * - TraceID:ETHERNET_NVETHERNETRM_023 - * - * @param[in] osi_core: OSI core private data structure. Used param is base. - * @param[in] addend: Addend value to be configured + * - This sequence is used to configure L3L4 filters for SA and DA Port Number matching. + * - Prepare register data using prepare_l3l4_registers(). + * - Write l3l4 reigsters using mgbe_l3l4_filter_write(). + * - Return 0 on success. + * - Return -1 on any register failure. * - * @pre MAC should be initialized and started. see osi_start_mac() + * @param[in] osi_core: OSI core private data structure. + * @param[in] filter_no_r: filter index + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No + * @note 1) MAC should be init and started. see osi_start_mac() + * 2) osi_core->osd should be populated * * @retval 0 on success * @retval -1 on failure. */ -static nve32_t eqos_config_addend(struct osi_core_priv_data *const osi_core, - const nveu32_t addend) +static nve32_t eqos_config_l3l4_filters(struct osi_core_priv_data *const osi_core, + nveu32_t filter_no_r, + const struct osi_l3_l4_filter *const l3_l4) { - nveu32_t mac_tcr; - nve32_t ret; + void *base = osi_core->base; +#ifndef OSI_STRIPPED_LIB + nveu32_t l3_addr0_reg = 0; + nveu32_t l3_addr2_reg = 0; + nveu32_t l3_addr3_reg = 0; + nveu32_t l4_addr_reg = 0; +#endif /* !OSI_STRIPPED_LIB */ + nveu32_t l3_addr1_reg = 0; + nveu32_t ctr_reg = 0; + nveu32_t filter_no = filter_no_r & (OSI_MGBE_MAX_L3_L4_FILTER - 1U); - /* To be sure previous write was flushed (if Any) */ - ret = eqos_poll_for_addend_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } + prepare_l3l4_registers(osi_core, l3_l4, +#ifndef OSI_STRIPPED_LIB + &l3_addr0_reg, + &l3_addr2_reg, + &l3_addr3_reg, + &l4_addr_reg, +#endif /* !OSI_STRIPPED_LIB */ + &l3_addr1_reg, + &ctr_reg); - /* write addend value to MAC_Timestamp_Addend register */ - eqos_core_safety_writel(osi_core, addend, - (nveu8_t *)osi_core->base + EQOS_MAC_TAR, - EQOS_MAC_TAR_IDX); +#ifndef OSI_STRIPPED_LIB + /* Update l3 ip addr MGBE_MAC_L3_AD0R register */ + osi_writela(osi_core, l3_addr0_reg, (nveu8_t *)base + EQOS_MAC_L3_AD0R(filter_no)); - /* issue command to update the configured addend value */ - mac_tcr |= EQOS_MAC_TCR_TSADDREG; - eqos_core_safety_writel(osi_core, mac_tcr, - (nveu8_t *)osi_core->base + EQOS_MAC_TCR, - EQOS_MAC_TCR_IDX); + /* Update l3 ip addr MGBE_MAC_L3_AD2R register */ + osi_writela(osi_core, l3_addr2_reg, (nveu8_t *)base + EQOS_MAC_L3_AD2R(filter_no)); - ret = eqos_poll_for_addend_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } + /* Update l3 ip addr MGBE_MAC_L3_AD3R register */ + osi_writela(osi_core, l3_addr3_reg, (nveu8_t *)base + EQOS_MAC_L3_AD3R(filter_no)); + + /* Update l4 port EQOS_MAC_L4_ADR register */ + osi_writela(osi_core, l4_addr_reg, (nveu8_t *)base + EQOS_MAC_L4_ADR(filter_no)); +#endif /* !OSI_STRIPPED_LIB */ + + /* Update l3 ip addr MGBE_MAC_L3_AD1R register */ + osi_writela(osi_core, l3_addr1_reg, (nveu8_t *)base + EQOS_MAC_L3_AD1R(filter_no)); + + /* Write CTR register */ + osi_writela(osi_core, ctr_reg, (nveu8_t *)base + EQOS_MAC_L3L4_CTR(filter_no)); return 0; } @@ -4482,14 +2422,16 @@ static inline nve32_t eqos_poll_for_update_ts_complete( nveu32_t retry = RETRY_COUNT; nveu32_t count; nve32_t cond = COND_NOT_MET; + nve32_t ret = 0; /* Wait for previous(if any) time stamp value update to complete */ count = 0; while (cond == COND_NOT_MET) { if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "poll_for_update_ts: timeout\n", 0ULL); - return -1; + ret = -1; + goto fail; } /* Read and Check TSUPDT in MAC_Timestamp_Control register */ *mac_tcr = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -4501,8 +2443,8 @@ static inline nve32_t eqos_poll_for_update_ts_complete( count++; osi_core->osd_ops.udelay(OSI_DELAY_1000US); } - - return 0; +fail: + return ret; } @@ -4542,16 +2484,16 @@ static nve32_t eqos_adjust_mactime(struct osi_core_priv_data *const osi_core, const nveu32_t one_nsec_accuracy) { void *addr = osi_core->base; - nveu32_t mac_tcr; + nveu32_t mac_tcr = 0U; nveu32_t value = 0; nveul64_t temp = 0; nveu32_t sec1 = sec; nveu32_t nsec1 = nsec; - nve32_t ret; + nve32_t ret = 0; ret = eqos_poll_for_update_ts_complete(osi_core, &mac_tcr); if (ret == -1) { - return -1; + goto fail; } if (add_sub != 0U) { @@ -4593,593 +2535,107 @@ static nve32_t eqos_adjust_mactime(struct osi_core_priv_data *const osi_core, value |= add_sub << EQOS_MAC_STNSUR_ADDSUB_SHIFT; osi_writela(osi_core, value, (nveu8_t *)addr + EQOS_MAC_STNSUR); - /* issue command to initialize system time with the value - * specified in MAC_STSUR and MAC_STNSUR - */ - mac_tcr |= EQOS_MAC_TCR_TSUPDT; - eqos_core_safety_writel(osi_core, mac_tcr, - (nveu8_t *)addr + EQOS_MAC_TCR, - EQOS_MAC_TCR_IDX); - - ret = eqos_poll_for_update_ts_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - return 0; -} - -/** \cond DO_NOT_DOCUMENT */ -/** - * @brief eqos_config_tscr - Configure Time Stamp Register - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] ptp_filter: PTP rx filter parameters - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_config_tscr(struct osi_core_priv_data *const osi_core, - const nveu32_t ptp_filter) -{ - void *addr = osi_core->base; - struct core_local *l_core = (struct core_local *)osi_core; - nveu32_t mac_tcr = 0U, i = 0U, temp = 0U; - nveu32_t value = 0x0U; - - if (ptp_filter != OSI_DISABLE) { - mac_tcr = (OSI_MAC_TCR_TSENA | - OSI_MAC_TCR_TSCFUPDT | - OSI_MAC_TCR_TSCTRLSSR); - - for (i = 0U; i < 32U; i++) { - temp = ptp_filter & OSI_BIT(i); - - switch (temp) { - case OSI_MAC_TCR_SNAPTYPSEL_1: - mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_1; - break; - case OSI_MAC_TCR_SNAPTYPSEL_2: - mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_2; - break; - case OSI_MAC_TCR_TSIPV4ENA: - mac_tcr |= OSI_MAC_TCR_TSIPV4ENA; - break; - case OSI_MAC_TCR_TSIPV6ENA: - mac_tcr |= OSI_MAC_TCR_TSIPV6ENA; - break; - case OSI_MAC_TCR_TSEVENTENA: - mac_tcr |= OSI_MAC_TCR_TSEVENTENA; - break; - case OSI_MAC_TCR_TSMASTERENA: - mac_tcr |= OSI_MAC_TCR_TSMASTERENA; - break; - case OSI_MAC_TCR_TSVER2ENA: - mac_tcr |= OSI_MAC_TCR_TSVER2ENA; - break; - case OSI_MAC_TCR_TSIPENA: - mac_tcr |= OSI_MAC_TCR_TSIPENA; - break; - case OSI_MAC_TCR_AV8021ASMEN: - mac_tcr |= OSI_MAC_TCR_AV8021ASMEN; - break; - case OSI_MAC_TCR_TSENALL: - mac_tcr |= OSI_MAC_TCR_TSENALL; - break; - case OSI_MAC_TCR_CSC: - mac_tcr |= OSI_MAC_TCR_CSC; - break; - default: - /* To avoid MISRA violation */ - mac_tcr |= mac_tcr; - break; - } - } - } else { - /* Disabling the MAC time stamping */ - mac_tcr = OSI_DISABLE; - } - - eqos_core_safety_writel(osi_core, mac_tcr, - (nveu8_t *)addr + EQOS_MAC_TCR, - EQOS_MAC_TCR_IDX); - value = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_PPS_CTL); - value &= ~EQOS_MAC_PPS_CTL_PPSCTRL0; - if (l_core->pps_freq == OSI_ENABLE) { - value |= OSI_ENABLE; - } - osi_writela(osi_core, value, (nveu8_t *)addr + EQOS_MAC_PPS_CTL); -} -/** \endcond */ - -/** - * @brief eqos_config_ptp_rxq - To config PTP RX packets queue - * - * Algorithm: This function is used to program the PTP RX packets queue. - * - * @param[in] osi_core: OSI core private data. - * - * @note 1) MAC should be init and started. see osi_start_mac() - * 2) osi_core->osd should be populated - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int eqos_config_ptp_rxq(struct osi_core_priv_data *osi_core, - const unsigned int rxq_idx, - const unsigned int enable) -{ - unsigned char *base = osi_core->base; - unsigned int value = OSI_NONE; - unsigned int i = 0U; - - /* Validate the RX queue index argment */ - if (rxq_idx >= OSI_EQOS_MAX_NUM_QUEUES) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid PTP RX queue index\n", - rxq_idx); - return -1; - } - /* Check MAC version */ - if (osi_core->mac_ver <= OSI_EQOS_MAC_5_00) { - /* MAC 4_10 and 5 doesn't have PTP RX Queue route support */ - return 0; - } - - /* Validate enable argument */ - if (enable != OSI_ENABLE && enable != OSI_DISABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid enable input\n", - enable); - return -1; - } - - /* Validate PTP RX queue enable */ - for (i = 0; i < osi_core->num_mtl_queues; i++) { - if (osi_core->mtl_queues[i] == rxq_idx) { - /* Given PTP RX queue is enabled */ - break; - } - } - - if (i == osi_core->num_mtl_queues) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "PTP RX queue not enabled\n", - rxq_idx); - return -1; - } - - /* Read MAC_RxQ_Ctrl1 */ - value = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_RQC1R); - if (enable == OSI_DISABLE) { - /** Reset OMCBCQ bit to disable over-riding the MCBC Queue - * priority for the PTP RX queue. - */ - value &= ~EQOS_MAC_RQC1R_OMCBCQ; - - } else { - /* Program PTPQ with ptp_rxq */ - osi_core->ptp_config.ptp_rx_queue = rxq_idx; - value &= ~EQOS_MAC_RQC1R_PTPQ; - value |= (rxq_idx << EQOS_MAC_RQC1R_PTPQ_SHIFT); - /* Reset TPQC before setting TPQC0 */ - value &= ~EQOS_MAC_RQC1R_TPQC; - /** Set TPQC to 0x1 for VLAN Tagged PTP over - * ethernet packets are routed to Rx Queue specified - * by PTPQ field - **/ - value |= EQOS_MAC_RQC1R_TPQC0; - /** Set OMCBCQ bit to enable over-riding the MCBC Queue - * priority for the PTP RX queue. - */ - value |= EQOS_MAC_RQC1R_OMCBCQ; - } - /* Write MAC_RxQ_Ctrl1 */ - osi_writela(osi_core, value, base + EQOS_MAC_RQC1R); - - return 0; -} - -/** - * @brief eqos_config_ssir - Configure SSIR register - * - * @note - * Algorithm: - * - Calculate SSIR - * - For Coarse method(EQOS_MAC_TCR_TSCFUPDT not set in TCR register), ((1/ptp_clock) * 1000000000). - * - For fine correction use predeined value based on MAC version OSI_PTP_SSINC_16 if MAC version - * less than OSI_EQOS_MAC_4_10 and OSI_PTP_SSINC_4 if otherwise. - * - If EQOS_MAC_TCR_TSCTRLSSR bit not set in TCR register, set accurasy to 0.465ns. - * - i.e new val = val * 1000/465; - * - Program the calculated value to EQOS_MAC_SSIR register - * - Refer to EQOS column of <> for API details. - * - SWUD_ID: ETHERNET_NVETHERNETRM_021_1 - * - * @param[in] osi_core: OSI core private data structure. Used param is base, mac_ver. - * - * @pre MAC should be initialized and started. see osi_start_mac() - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_config_ssir(struct osi_core_priv_data *const osi_core, - const unsigned int ptp_clock) -{ - nveul64_t val; - nveu32_t mac_tcr; - void *addr = osi_core->base; - - mac_tcr = osi_readla(osi_core, (nveu8_t *)addr + EQOS_MAC_TCR); - - if ((mac_tcr & EQOS_MAC_TCR_TSCFUPDT) == EQOS_MAC_TCR_TSCFUPDT) { - if (osi_core->mac_ver <= OSI_EQOS_MAC_4_10) { - val = OSI_PTP_SSINC_16; - } else if (osi_core->mac_ver == OSI_EQOS_MAC_5_30) { - val = OSI_PTP_SSINC_6; - } else { - val = OSI_PTP_SSINC_4; - } - } else { - /* convert the PTP required clock frequency to nano second for - * COARSE correction. - * Formula: ((1/ptp_clock) * 1000000000) - */ - val = ((1U * OSI_NSEC_PER_SEC) / ptp_clock); - } - - /* 0.465ns accurecy */ - if ((mac_tcr & EQOS_MAC_TCR_TSCTRLSSR) == 0U) { - if (val < UINT_MAX) { - val = (val * 1000U) / 465U; - } - } - - val |= val << EQOS_MAC_SSIR_SSINC_SHIFT; - /* update Sub-second Increment Value */ - if (val < UINT_MAX) { - eqos_core_safety_writel(osi_core, (nveu32_t)val, - (nveu8_t *)addr + EQOS_MAC_SSIR, - EQOS_MAC_SSIR_IDX); - } -} - -/** - * @brief eqos_core_deinit - EQOS MAC core deinitialization - * - * @note - * Algorithm: - * - This function calls eqos_stop_mac() - * - TraceId:ETHERNET_NVETHERNETRM_007 - * - * @param[in] osi_core: OSI core private data structure. Used param is base. - * - * @pre Required clks and resets has to be enabled - * - * @note - * API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - */ -static void eqos_core_deinit(struct osi_core_priv_data *const osi_core) -{ - /* Stop the MAC by disabling both MAC Tx and Rx */ - eqos_stop_mac(osi_core); -} - -/** - * @brief eqos_hw_est_write - indirect write the GCL to Software own list - * (SWOL) - * - * @param[in] base: MAC base IOVA address. - * @param[in] addr_val: Address offset for indirect write. - * @param[in] data: Data to be written at offset. - * @param[in] gcla: Gate Control List Address, 0 for ETS register. - * 1 for GCL memory. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int eqos_hw_est_write(struct osi_core_priv_data *osi_core, - unsigned int addr_val, - unsigned int data, unsigned int gcla) -{ - void *base = osi_core->base; - int retry = 1000; - unsigned int val = 0x0; - - osi_writela(osi_core, data, (unsigned char *)base + EQOS_MTL_EST_DATA); - - val &= ~EQOS_MTL_EST_ADDR_MASK; - val |= (gcla == 1U) ? 0x0U : EQOS_MTL_EST_GCRR; - val |= EQOS_MTL_EST_SRWO; - val |= addr_val; - osi_writela(osi_core, val, - (unsigned char *)base + EQOS_MTL_EST_GCL_CONTROL); - - while (--retry > 0) { - osi_core->osd_ops.udelay(OSI_DELAY_1US); - val = osi_readla(osi_core, (unsigned char *)base + - EQOS_MTL_EST_GCL_CONTROL); - if ((val & EQOS_MTL_EST_SRWO) == EQOS_MTL_EST_SRWO) { - continue; - } - - break; - } - - if (((val & EQOS_MTL_EST_ERR0) == EQOS_MTL_EST_ERR0) || - (retry <= 0)) { - return -1; - } - - return 0; -} - -/** - * @brief eqos_hw_config_est - Read Setting for GCL from input and update - * registers. - * - * Algorithm: - * 1) Write TER, LLR and EST control register - * 2) Update GCL to sw own GCL (MTL_EST_Status bit SWOL will tell which is - * owned by SW) and store which GCL is in use currently in sw. - * 3) TODO set DBGB and DBGM for debugging - * 4) EST_data and GCRR to 1, update entry sno in ADDR and put data at - * est_gcl_data enable GCL MTL_EST_SSWL and wait for self clear or use - * SWLC in MTL_EST_Status. Please note new GCL will be pushed for each entry. - * 5) Configure btr. Update btr based on current time (current time - * should be updated based on PTP by this time) - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] est: EST configuration input argument. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int eqos_hw_config_est(struct osi_core_priv_data *osi_core, - struct osi_est_config *est) -{ - void *base = osi_core->base; - unsigned int btr[2] = {0}; - unsigned int val = 0x0; - unsigned int addr = 0x0; - unsigned int i; - int ret = 0; - - if ((osi_core->hw_feature != OSI_NULL) && - (osi_core->hw_feature->est_sel == OSI_DISABLE)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "EST not supported in HW\n", 0ULL); - return -1; - } - - if (est->en_dis == OSI_DISABLE) { - val = osi_readla(osi_core, - (nveu8_t *)base + EQOS_MTL_EST_CONTROL); - val &= ~EQOS_MTL_EST_CONTROL_EEST; - osi_writela(osi_core, val, - (nveu8_t *)base + EQOS_MTL_EST_CONTROL); - return 0; - } - - btr[0] = est->btr[0]; - btr[1] = est->btr[1]; - - if (btr[0] == 0U && btr[1] == 0U) { - common_get_systime_from_mac(osi_core->base, osi_core->mac, - &btr[1], &btr[0]); - } - - if (gcl_validate(osi_core, est, btr, osi_core->mac) < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL validation failed\n", 0LL); - return -1; - } - - ret = eqos_hw_est_write(osi_core, EQOS_MTL_EST_CTR_LOW, est->ctr[0], - OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL CTR[0] failed\n", 0LL); - return ret; - } - /* check for est->ctr[i] not more than FF, TODO as per hw config - * parameter we can have max 0x3 as this value in sec */ - est->ctr[1] &= EQOS_MTL_EST_CTR_HIGH_MAX; - ret = eqos_hw_est_write(osi_core, EQOS_MTL_EST_CTR_HIGH, est->ctr[1], - OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL CTR[1] failed\n", 0LL); - return ret; - } - - ret = eqos_hw_est_write(osi_core, EQOS_MTL_EST_TER, est->ter, - OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL TER failed\n", 0LL); - return ret; - } - - ret = eqos_hw_est_write(osi_core, EQOS_MTL_EST_LLR, est->llr, - OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL LLR failed\n", 0LL); - return ret; - } - - /* Write GCL table */ - for (i = 0U; i < est->llr; i++) { - addr = i; - addr = addr << EQOS_MTL_EST_ADDR_SHIFT; - addr &= EQOS_MTL_EST_ADDR_MASK; - ret = eqos_hw_est_write(osi_core, addr, est->gcl[i], - OSI_ENABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL enties write failed\n", - (unsigned long long)i); - return ret; - } - } - - /* Write parameters */ - ret = eqos_hw_est_write(osi_core, EQOS_MTL_EST_BTR_LOW, - btr[0] + est->btr_offset[0], OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL BTR[0] failed\n", - (unsigned long long)(btr[0] + - est->btr_offset[0])); - return ret; - } - - ret = eqos_hw_est_write(osi_core, EQOS_MTL_EST_BTR_HIGH, - btr[1] + est->btr_offset[1], OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL BTR[1] failed\n", - (unsigned long long)(btr[1] + - est->btr_offset[1])); - return ret; - } + /* issue command to initialize system time with the value + * specified in MAC_STSUR and MAC_STNSUR + */ + mac_tcr |= EQOS_MAC_TCR_TSUPDT; + osi_writela(osi_core, mac_tcr, (nveu8_t *)addr + EQOS_MAC_TCR); - val = osi_readla(osi_core, (unsigned char *) - base + EQOS_MTL_EST_CONTROL); - /* Store table */ - val |= EQOS_MTL_EST_CONTROL_SSWL; - val |= EQOS_MTL_EST_CONTROL_EEST; - val |= EQOS_MTL_EST_CONTROL_QHLBF; - osi_writela(osi_core, val, (nveu8_t *)base + EQOS_MTL_EST_CONTROL); + ret = eqos_poll_for_update_ts_complete(osi_core, &mac_tcr); +fail: return ret; } +#ifndef OSI_STRIPPED_LIB /** - * @brief eqos_hw_config_fep - Read Setting for preemption and express for TC - * and update registers. + * @brief eqos_config_ptp_rxq - To config PTP RX packets queue * - * Algorithm: - * 1) Check for TC enable and TC has masked for setting to preemptable. - * 2) update FPE control status register + * Algorithm: This function is used to program the PTP RX packets queue. * - * @param[in] osi_core: OSI core private data structure. - * @param[in] fpe: FPE configuration input argument. + * @param[in] osi_core: OSI core private data. * - * @note MAC should be init and started. see osi_start_mac() + * @note 1) MAC should be init and started. see osi_start_mac() + * 2) osi_core->osd should be populated * * @retval 0 on success * @retval -1 on failure. */ -static int eqos_hw_config_fpe(struct osi_core_priv_data *osi_core, - struct osi_fpe_config *fpe) +static nve32_t eqos_config_ptp_rxq(struct osi_core_priv_data *const osi_core, + const nveu32_t rxq_idx, + const nveu32_t enable) { - unsigned int i = 0U; - unsigned int val = 0U; - unsigned int temp = 0U, temp1 = 0U; - unsigned int temp_shift = 0U; + nveu8_t *base = osi_core->base; + nveu32_t value = OSI_NONE; + nveu32_t i = 0U; - if ((osi_core->hw_feature != OSI_NULL) && - (osi_core->hw_feature->fpe_sel == OSI_DISABLE)) { + /* Validate the RX queue index argment */ + if (rxq_idx >= OSI_EQOS_MAX_NUM_QUEUES) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "FPE not supported in HW\n", 0ULL); + "Invalid PTP RX queue index\n", + rxq_idx); return -1; } - - osi_core->fpe_ready = OSI_DISABLE; - - - if (((fpe->tx_queue_preemption_enable << EQOS_MTL_FPE_CTS_PEC_SHIFT) & - EQOS_MTL_FPE_CTS_PEC) == OSI_DISABLE) { - val = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MTL_FPE_CTS); - val &= ~EQOS_MTL_FPE_CTS_PEC; - osi_writela(osi_core, val, - (nveu8_t *)osi_core->base + EQOS_MTL_FPE_CTS); - - val = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MAC_FPE_CTS); - val &= ~EQOS_MAC_FPE_CTS_EFPE; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - EQOS_MAC_FPE_CTS); - + /* Check MAC version */ + if (osi_core->mac_ver <= OSI_EQOS_MAC_5_00) { + /* MAC 4_10 and 5 doesn't have PTP RX Queue route support */ return 0; } - val = osi_readla(osi_core, - (nveu8_t *)osi_core->base + EQOS_MTL_FPE_CTS); - val &= ~EQOS_MTL_FPE_CTS_PEC; - for (i = 0U; i < OSI_MAX_TC_NUM; i++) { - /* max 8 bit for this structure fot TC/TXQ. Set the TC for express or - * preemption. Default is express for a TC. DWCXG_NUM_TC = 8 */ - temp = OSI_BIT(i); - if ((fpe->tx_queue_preemption_enable & temp) == temp) { - temp_shift = i; - temp_shift += EQOS_MTL_FPE_CTS_PEC_SHIFT; - /* set queue for preemtable */ - if (temp_shift < EQOS_MTL_FPE_CTS_PEC_MAX_SHIFT) { - temp1 = OSI_ENABLE; - temp1 = temp1 << temp_shift; - val |= temp1; - } else { - /* Do nothing */ - } + /* Validate enable argument */ + if ((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid enable input\n", + enable); + return -1; + } + + /* Validate PTP RX queue enable */ + for (i = 0; i < osi_core->num_mtl_queues; i++) { + if (osi_core->mtl_queues[i] == rxq_idx) { + /* Given PTP RX queue is enabled */ + break; } } - osi_writela(osi_core, val, - (nveu8_t *)osi_core->base + EQOS_MTL_FPE_CTS); - /* Setting RQ as RxQ 0 is not allowed */ - if (fpe->rq == 0x0U || fpe->rq >= OSI_EQOS_MAX_NUM_CHANS) { + if (i == osi_core->num_mtl_queues) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "EST init failed due to wrong RQ\n", fpe->rq); + "PTP RX queue not enabled\n", + rxq_idx); return -1; } - val = osi_readla(osi_core, - (unsigned char *)osi_core->base + EQOS_MAC_RQC1R); - val &= ~EQOS_MAC_RQC1R_FPRQ; - temp = fpe->rq; - temp = temp << EQOS_MAC_RQC1R_FPRQ_SHIFT; - temp = (temp & EQOS_MAC_RQC1R_FPRQ); - val |= temp; - /* update RQ in OSI CORE struct */ - osi_core->residual_queue = fpe->rq; - osi_writela(osi_core, val, - (unsigned char *)osi_core->base + EQOS_MAC_RQC1R); - - /* initiate SVER for SMD-V and SMD-R */ - val = osi_readla(osi_core, - (unsigned char *)osi_core->base + EQOS_MTL_FPE_CTS); - val |= EQOS_MAC_FPE_CTS_SVER; - osi_writela(osi_core, val, - (unsigned char *)osi_core->base + EQOS_MAC_FPE_CTS); + /* Read MAC_RxQ_Ctrl1 */ + value = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_RQC1R); + if (enable == OSI_DISABLE) { + /** Reset OMCBCQ bit to disable over-riding the MCBC Queue + * priority for the PTP RX queue. + */ + value &= ~EQOS_MAC_RQC1R_OMCBCQ; - val = osi_readla(osi_core, - (unsigned char *)osi_core->base + EQOS_MTL_FPE_ADV); - val &= ~EQOS_MTL_FPE_ADV_HADV_MASK; - /* (minimum_fragment_size +IPG/EIPG + Preamble) *.8 ~98ns for10G */ - val |= EQOS_MTL_FPE_ADV_HADV_VAL; - osi_writela(osi_core, val, - (unsigned char *)osi_core->base + EQOS_MTL_FPE_ADV); + } else { + /* Program PTPQ with ptp_rxq */ + osi_core->ptp_config.ptp_rx_queue = rxq_idx; + value &= ~EQOS_MAC_RQC1R_PTPQ; + value |= (rxq_idx << EQOS_MAC_RQC1R_PTPQ_SHIFT); + /* Reset TPQC before setting TPQC0 */ + value &= ~EQOS_MAC_RQC1R_TPQC; + /** Set TPQC to 0x1 for VLAN Tagged PTP over + * ethernet packets are routed to Rx Queue specified + * by PTPQ field + **/ + value |= EQOS_MAC_RQC1R_TPQC0; + /** Set OMCBCQ bit to enable over-riding the MCBC Queue + * priority for the PTP RX queue. + */ + value |= EQOS_MAC_RQC1R_OMCBCQ; + } + /* Write MAC_RxQ_Ctrl1 */ + osi_writela(osi_core, value, base + EQOS_MAC_RQC1R); return 0; } +#endif /* !OSI_STRIPPED_LIB */ /** \cond DO_NOT_DOCUMENT */ /** @@ -5205,13 +2661,15 @@ static inline nve32_t poll_for_mii_idle(struct osi_core_priv_data *osi_core) nveu32_t mac_gmiiar; nveu32_t count; nve32_t cond = COND_NOT_MET; + nve32_t ret = 0; count = 0; while (cond == COND_NOT_MET) { if (count > retry) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "MII operation timed out\n", 0ULL); - return -1; + ret = -1; + goto fail; } count++; @@ -5225,8 +2683,8 @@ static inline nve32_t poll_for_mii_idle(struct osi_core_priv_data *osi_core) osi_core->osd_ops.udelay(10U); } } - - return 0; +fail: + return ret; } /** \endcond */ @@ -5276,7 +2734,7 @@ static nve32_t eqos_write_phy_reg(struct osi_core_priv_data *const osi_core, ret = poll_for_mii_idle(osi_core); if (ret < 0) { /* poll_for_mii_idle fail */ - return ret; + goto fail; } /* C45 register access */ @@ -5329,7 +2787,9 @@ static nve32_t eqos_write_phy_reg(struct osi_core_priv_data *const osi_core, osi_writela(osi_core, mac_gmiiar, (nveu8_t *)osi_core->base + EQOS_MAC_MDIO_ADDRESS); /* wait for MII write operation to complete */ - return poll_for_mii_idle(osi_core); + ret = poll_for_mii_idle(osi_core); +fail: + return ret; } /** @@ -5377,7 +2837,7 @@ static nve32_t eqos_read_phy_reg(struct osi_core_priv_data *const osi_core, ret = poll_for_mii_idle(osi_core); if (ret < 0) { /* poll_for_mii_idle fail */ - return ret; + goto fail; } /* C45 register access */ if ((phyreg & OSI_MII_ADDR_C45) == OSI_MII_ADDR_C45) { @@ -5424,14 +2884,16 @@ static nve32_t eqos_read_phy_reg(struct osi_core_priv_data *const osi_core, ret = poll_for_mii_idle(osi_core); if (ret < 0) { /* poll_for_mii_idle fail */ - return ret; + goto fail; } mac_gmiidr = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_MDIO_DATA); data = (mac_gmiidr & EQOS_MAC_GMIIDR_GD_MASK); - return (nve32_t)data; + ret = (nve32_t)data; +fail: + return ret; } /** @@ -5485,12 +2947,23 @@ static nveu32_t eqos_write_reg(struct osi_core_priv_data *const osi_core, * - Initialization: Yes * - Run time: Yes * - De-initialization: Yes - * @retval data from register on success + * @retval data from register on success and 0xffffffff on failure */ static nveu32_t eqos_read_macsec_reg(struct osi_core_priv_data *const osi_core, const nve32_t reg) { - return osi_readla(osi_core, (nveu8_t *)osi_core->macsec_base + reg); + nveu32_t ret = 0; + + if (osi_core->macsec_ops != OSI_NULL) { + ret = osi_readla(osi_core, (nveu8_t *)osi_core->macsec_base + + reg); + } else { + /* macsec is not supported or not enabled in DT */ + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "read reg failed", 0ULL); + ret = 0xffffffff; + } + return ret; } /** @@ -5505,13 +2978,23 @@ static nveu32_t eqos_read_macsec_reg(struct osi_core_priv_data *const osi_core, * - Initialization: Yes * - Run time: Yes * - De-initialization: Yes - * @retval 0 + * @retval 0 on success or 0xffffffff on error */ static nveu32_t eqos_write_macsec_reg(struct osi_core_priv_data *const osi_core, const nveu32_t val, const nve32_t reg) { - osi_writela(osi_core, val, (nveu8_t *)osi_core->macsec_base + reg); - return 0; + nveu32_t ret = 0; + + if (osi_core->macsec_ops != OSI_NULL) { + osi_writela(osi_core, val, (nveu8_t *)osi_core->macsec_base + + reg); + } else { + /* macsec is not supported or not enabled in DT */ + OSI_CORE_ERR(osi_core->osd, + OSI_LOG_ARG_HW_FAIL, "write reg failed", 0ULL); + ret = 0xffffffff; + } + return ret; } #endif /* MACSEC_SUPPORT */ @@ -5549,67 +3032,6 @@ static inline void eqos_disable_tx_lpi( (nveu8_t *)addr + EQOS_MAC_LPI_CSR); } -/** - * @brief Read-validate HW registers for functional safety. - * - * @note - * Algorithm: - * - Reads pre-configured list of MAC/MTL configuration registers - * and compares with last written value for any modifications. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre - * - MAC has to be out of reset. - * - osi_hw_core_init has to be called. Internally this would initialize - * the safety_config (see osi_core_priv_data) based on MAC version and - * which specific registers needs to be validated periodically. - * - Invoke this call if (osi_core_priv_data->safety_config != OSI_NULL) - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_validate_core_regs( - struct osi_core_priv_data *const osi_core) -{ - struct core_func_safety *config = - (struct core_func_safety *)osi_core->safety_config; - nveu32_t cur_val; - nveu32_t i; - - osi_lock_irq_enabled(&config->core_safety_lock); - for (i = EQOS_MAC_MCR_IDX; i < EQOS_MAX_CORE_SAFETY_REGS; i++) { - if (config->reg_addr[i] == OSI_NULL) { - continue; - } - cur_val = osi_readla(osi_core, - (nveu8_t *)config->reg_addr[i]); - cur_val &= config->reg_mask[i]; - - if (cur_val == config->reg_val[i]) { - continue; - } else { - /* Register content differs from what was written. - * Return error and let safety manager (NVGaurd etc.) - * take care of corrective action. - */ - osi_unlock_irq_enabled(&config->core_safety_lock); - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "register mismatch\n", 0ULL); - return -1; - } - } - osi_unlock_irq_enabled(&config->core_safety_lock); - - return 0; -} - /** * @brief eqos_config_rx_crc_check - Configure CRC Checking for Rx Packets * @@ -5642,7 +3064,7 @@ static nve32_t eqos_config_rx_crc_check( /* return on invalid argument */ if ((crc_chk != OSI_ENABLE) && (crc_chk != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "rx_crc: invalid input\n", 0ULL); return -1; } @@ -5699,7 +3121,7 @@ static nve32_t eqos_config_tx_status(struct osi_core_priv_data *const osi_core, /* don't allow if tx_status is other than 0 or 1 */ if ((tx_status != OSI_ENABLE) && (tx_status != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "tx_status: invalid input\n", 0ULL); return -1; } @@ -5728,6 +3150,7 @@ static nve32_t eqos_config_tx_status(struct osi_core_priv_data *const osi_core, return 0; } +#endif /* !OSI_STRIPPED_LIB */ /** * @brief eqos_set_avb_algorithm - Set TxQ/TC avb config @@ -5771,21 +3194,21 @@ static nve32_t eqos_set_avb_algorithm( if (avb == OSI_NULL) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "avb structure is NULL\n", 0ULL); - return ret; + goto done; } /* queue index in range */ if (avb->qindex >= OSI_EQOS_MAX_NUM_QUEUES) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue index\n", (nveul64_t)avb->qindex); - return ret; + goto done; } /* queue oper_mode in range check*/ if (avb->oper_mode >= OSI_MTL_QUEUE_MODEMAX) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue mode\n", (nveul64_t)avb->qindex); - return ret; + goto done; } /* can't set AVB mode for queue 0 */ @@ -5793,7 +3216,7 @@ static nve32_t eqos_set_avb_algorithm( OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OPNOTSUPP, "Not allowed to set AVB for Q0\n", (nveul64_t)avb->qindex); - return ret; + goto done; } qinx = avb->qindex; @@ -5803,9 +3226,7 @@ static nve32_t eqos_set_avb_algorithm( /* Set TxQ/TC mode as per input struct after masking 3 bit */ value |= (avb->oper_mode << EQOS_MTL_TXQEN_MASK_SHIFT) & EQOS_MTL_TXQEN_MASK; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MTL_CHX_TX_OP_MODE(qinx), - EQOS_MTL_CH0_TX_OP_MODE_IDX + qinx); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_CHX_TX_OP_MODE(qinx)); /* Set Algo and Credit control */ value = OSI_DISABLE; @@ -5829,10 +3250,8 @@ static nve32_t eqos_set_avb_algorithm( EQOS_MTL_TXQ_QW(qinx)); value &= ~EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK; value |= avb->idle_slope & EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK; - eqos_core_safety_writel(osi_core, value, - (nveu8_t *)osi_core->base + - EQOS_MTL_TXQ_QW(qinx), - EQOS_MTL_TXQ0_QW_IDX + qinx); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + EQOS_MTL_TXQ_QW(qinx)); /* Set Hi credit */ value = avb->hi_credit & EQOS_MTL_TXQ_ETS_HCR_HC_MASK; @@ -5845,9 +3264,24 @@ static nve32_t eqos_set_avb_algorithm( value = avb->low_credit & EQOS_MTL_TXQ_ETS_LCR_LC_MASK; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MTL_TXQ_ETS_LCR(qinx)); + } else { + /* Reset register values to POR/initialized values */ + osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + + EQOS_MTL_TXQ_ETS_SSCR(qinx)); + + osi_writela(osi_core, EQOS_MTL_TXQ_QW_ISCQW, + (nveu8_t *)osi_core->base + EQOS_MTL_TXQ_QW(qinx)); + + osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + + EQOS_MTL_TXQ_ETS_HCR(qinx)); + + osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + + EQOS_MTL_TXQ_ETS_LCR(qinx)); } - return 0; + ret = 0; +done: + return ret; } /** @@ -5891,13 +3325,13 @@ static nve32_t eqos_get_avb_algorithm(struct osi_core_priv_data *const osi_core, if (avb == OSI_NULL) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "avb structure is NULL\n", 0ULL); - return ret; + goto done; } if (avb->qindex >= OSI_EQOS_MAX_NUM_QUEUES) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue index\n", (nveul64_t)avb->qindex); - return ret; + goto done; } qinx = avb->qindex; @@ -5938,9 +3372,13 @@ static nve32_t eqos_get_avb_algorithm(struct osi_core_priv_data *const osi_core, EQOS_MTL_TXQ_ETS_LCR(qinx)); avb->low_credit = value & EQOS_MTL_TXQ_ETS_LCR_LC_MASK; - return 0; + ret = 0; + +done: + return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_config_arp_offload - Enable/Disable ARP offload * @@ -5995,7 +3433,7 @@ static nve32_t eqos_config_arp_offload( EQOS_5_00_MAC_ARPPA); } else { /* Unsupported MAC ver */ - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "arp_offload: invalid HW\n", 0ULL); return -1; } @@ -6005,9 +3443,7 @@ static nve32_t eqos_config_arp_offload( mac_mcr &= ~EQOS_MCR_ARPEN; } - eqos_core_safety_writel(osi_core, mac_mcr, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); + osi_writela(osi_core, mac_mcr, (nveu8_t *)addr + EQOS_MAC_MCR); return 0; } @@ -6049,21 +3485,21 @@ static nve32_t eqos_config_vlan_filtering( if ((filter_enb_dis != OSI_ENABLE) && (filter_enb_dis != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "vlan_filter: invalid input\n", 0ULL); return -1; } if ((perfect_hash_filtering != OSI_ENABLE) && (perfect_hash_filtering != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "vlan_filter: invalid input\n", 0ULL); return -1; } if ((perfect_inverse_match != OSI_ENABLE) && (perfect_inverse_match != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "vlan_filter: invalid input\n", 0ULL); return -1; } @@ -6071,8 +3507,7 @@ static nve32_t eqos_config_vlan_filtering( value = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_PFR); value &= ~(EQOS_MAC_PFR_VTFE); value |= ((filter_enb_dis << EQOS_MAC_PFR_SHIFT) & EQOS_MAC_PFR_VTFE); - eqos_core_safety_writel(osi_core, value, (nveu8_t *)base + EQOS_MAC_PFR, - EQOS_MAC_PFR_IDX); + osi_writela(osi_core, value, (nveu8_t *)base + EQOS_MAC_PFR); value = osi_readla(osi_core, (nveu8_t *)base + EQOS_MAC_VLAN_TR); value &= ~(EQOS_MAC_VLAN_TR_VTIM | EQOS_MAC_VLAN_TR_VTHM); @@ -6177,74 +3612,6 @@ static void eqos_configure_eee(struct osi_core_priv_data *const osi_core, } } -/** - * @brief Function to store a backup of MAC register space during SOC suspend. - * - * @note - * Algorithm: - * - Read registers to be backed up as per struct core_backup and - * store the register values in memory. - * - * @param[in] osi_core: OSI core private data structure. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on Success - */ -static inline nve32_t eqos_save_registers( - struct osi_core_priv_data *const osi_core) -{ - nveu32_t i; - struct core_backup *config = &osi_core->backup_config; - - for (i = 0; i < EQOS_MAX_BAK_IDX; i++) { - if (config->reg_addr[i] != OSI_NULL) { - config->reg_val[i] = osi_readla(osi_core, - config->reg_addr[i]); - } - } - - return 0; -} - -/** - * @brief Function to restore the backup of MAC registers during SOC resume. - * - * @note - * Algorithm: - * - Restore the register values from the in memory backup taken using - * eqos_save_registers(). - * - * @param[in] osi_core: OSI core private data structure. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on Success - */ -static inline nve32_t eqos_restore_registers( - struct osi_core_priv_data *const osi_core) -{ - nveu32_t i; - struct core_backup *config = &osi_core->backup_config; - - for (i = 0; i < EQOS_MAX_BAK_IDX; i++) { - if (config->reg_addr[i] != OSI_NULL) { - osi_writela(osi_core, config->reg_val[i], - config->reg_addr[i]); - } - } - - return 0; -} - /** * @brief eqos_set_mdc_clk_rate - Derive MDC clock based on provided AXI_CBB clk * @@ -6347,9 +3714,7 @@ static nve32_t eqos_config_mac_loopback( (nveu8_t *)addr + EQOS_CLOCK_CTRL_0); /* Write to MAC Configuration Register */ - eqos_core_safety_writel(osi_core, mcr_val, - (nveu8_t *)addr + EQOS_MAC_MCR, - EQOS_MAC_MCR_IDX); + osi_writela(osi_core, mcr_val, (nveu8_t *)addr + EQOS_MAC_MCR); return 0; } @@ -6363,10 +3728,10 @@ static nve32_t eqos_get_hw_features(struct osi_core_priv_data *const osi_core, nveu32_t mac_hfr2 = 0; nveu32_t mac_hfr3 = 0; - mac_hfr0 = eqos_read_reg(osi_core, EQOS_MAC_HFR0); - mac_hfr1 = eqos_read_reg(osi_core, EQOS_MAC_HFR1); - mac_hfr2 = eqos_read_reg(osi_core, EQOS_MAC_HFR2); - mac_hfr3 = eqos_read_reg(osi_core, EQOS_MAC_HFR3); + mac_hfr0 = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_HFR0); + mac_hfr1 = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_HFR1); + mac_hfr2 = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_HFR2); + mac_hfr3 = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_HFR3); hw_feat->mii_sel = ((mac_hfr0 >> EQOS_MAC_HFR0_MIISEL_SHIFT) & EQOS_MAC_HFR0_MIISEL_MASK); @@ -6496,8 +3861,8 @@ static nve32_t eqos_get_hw_features(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int eqos_padctl_rx_pins(struct osi_core_priv_data *const osi_core, - unsigned int enable) +static nve32_t eqos_padctl_rx_pins(struct osi_core_priv_data *const osi_core, + nveu32_t enable) { nveu32_t value; void *pad_addr = osi_core->padctrl.padctrl_base; @@ -6532,7 +3897,7 @@ static int eqos_padctl_rx_pins(struct osi_core_priv_data *const osi_core, osi_writela(osi_core, value, (nveu8_t *)pad_addr + osi_core->padctrl.offset_rd3); } else { - value = osi_readla(osi_core, (unsigned char *)pad_addr + + value = osi_readla(osi_core, (nveu8_t *)pad_addr + osi_core->padctrl.offset_rx_ctl); value &= ~EQOS_PADCTL_EQOS_E_INPUT; osi_writela(osi_core, value, (nveu8_t *)pad_addr + @@ -6574,7 +3939,7 @@ static int eqos_padctl_rx_pins(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static inline int poll_for_mac_tx_rx_idle(struct osi_core_priv_data *osi_core) +static inline nve32_t poll_for_mac_tx_rx_idle(struct osi_core_priv_data *osi_core) { nveu32_t retry = 0; nveu32_t mac_debug; @@ -6621,7 +3986,7 @@ static inline int poll_for_mac_tx_rx_idle(struct osi_core_priv_data *osi_core) * @retval negative value on failure. */ -static int eqos_pre_pad_calibrate(struct osi_core_priv_data *const osi_core) +static nve32_t eqos_pre_pad_calibrate(struct osi_core_priv_data *const osi_core) { nve32_t ret = 0; nveu32_t value; @@ -6630,9 +3995,8 @@ static int eqos_pre_pad_calibrate(struct osi_core_priv_data *const osi_core) /* Read MAC IMR Register */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); value &= ~(EQOS_IMR_RGSMIIIE); - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_IMR, EQOS_MAC_IMR_IDX); - eqos_stop_mac(osi_core); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); + hw_stop_mac(osi_core); ret = poll_for_mii_idle(osi_core); if (ret < 0) { goto error; @@ -6656,7 +4020,7 @@ static int eqos_pre_pad_calibrate(struct osi_core_priv_data *const osi_core) return ret; error: /* roll back on fail */ - eqos_start_mac(osi_core); + hw_start_mac(osi_core); if (osi_core->osd_ops.padctrl_mii_rx_pins != OSI_NULL) { (void)osi_core->osd_ops.padctrl_mii_rx_pins(osi_core->osd, OSI_ENABLE); @@ -6666,10 +4030,9 @@ static int eqos_pre_pad_calibrate(struct osi_core_priv_data *const osi_core) /* Enable MAC RGSMIIIE - RGMII/SMII interrupts */ /* Read MAC IMR Register */ - value = osi_readl((unsigned char *)osi_core->base + EQOS_MAC_IMR); + value = osi_readl((nveu8_t *)osi_core->base + EQOS_MAC_IMR); value |= EQOS_IMR_RGSMIIIE; - eqos_core_safety_writel(osi_core, value, (nveu8_t *)osi_core->base + - EQOS_MAC_IMR, EQOS_MAC_IMR_IDX); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); return ret; } @@ -6721,15 +4084,15 @@ static nve32_t eqos_post_pad_calibrate( /* do nothing */ } } - eqos_start_mac(osi_core); + hw_start_mac(osi_core); /* Enable MAC RGSMIIIE - RGMII/SMII interrupts */ mac_imr |= EQOS_IMR_RGSMIIIE; - eqos_core_safety_writel(osi_core, mac_imr, (nveu8_t *)osi_core->base + - EQOS_MAC_IMR, EQOS_MAC_IMR_IDX); + osi_writela(osi_core, mac_imr, (nveu8_t *)osi_core->base + EQOS_MAC_IMR); return ret; } #endif /* UPDATED_PAD_CAL */ +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_config_rss - Configure RSS * @@ -6739,15 +4102,17 @@ static nve32_t eqos_post_pad_calibrate( * * @retval -1 Always */ -static nve32_t eqos_config_rss(struct osi_core_priv_data *const osi_core) +static nve32_t eqos_config_rss(struct osi_core_priv_data *osi_core) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + (void) osi_core; + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "RSS not supported by EQOS\n", 0ULL); return -1; } +#endif /* !OSI_STRIPPED_LIB */ -#ifdef MACSEC_SUPPORT +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /** * @brief eqos_config_for_macsec - Configure MAC according to macsec IAS * @@ -6777,9 +4142,9 @@ static void eqos_config_for_macsec(struct osi_core_priv_data *const osi_core, nveu32_t value = 0U, temp = 0U; if ((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Failed to config EQOS per MACSEC\n", 0ULL); - return; + goto done; } if (osi_core->mac_ver == OSI_EQOS_MAC_5_30) { /* stop MAC Tx */ @@ -6847,90 +4212,54 @@ static void eqos_config_for_macsec(struct osi_core_priv_data *const osi_core, OSI_LOG_ARG_HW_FAIL, "Error: osi_core->hw_feature is NULL\n", 0ULL); } +done: + return; } #endif /* MACSEC_SUPPORT */ -/** - * @brief eqos_get_core_safety_config - EQOS MAC safety configuration - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -void *eqos_get_core_safety_config(void) -{ - return &eqos_core_safety_config; -} - void eqos_init_core_ops(struct core_ops *ops) { - ops->poll_for_swr = eqos_poll_for_swr; ops->core_init = eqos_core_init; - ops->core_deinit = eqos_core_deinit; - ops->start_mac = eqos_start_mac; - ops->stop_mac = eqos_stop_mac; ops->handle_common_intr = eqos_handle_common_intr; - ops->set_mode = eqos_set_mode; - ops->set_speed = eqos_set_speed; ops->pad_calibrate = eqos_pad_calibrate; - ops->config_fw_err_pkts = eqos_config_fw_err_pkts; - ops->config_rxcsum_offload = eqos_config_rxcsum_offload; - ops->config_mac_pkt_filter_reg = eqos_config_mac_pkt_filter_reg; ops->update_mac_addr_low_high_reg = eqos_update_mac_addr_low_high_reg; - ops->config_l3_l4_filter_enable = eqos_config_l3_l4_filter_enable; - ops->config_l3_filters = eqos_config_l3_filters; - ops->update_ip4_addr = eqos_update_ip4_addr; - ops->update_ip6_addr = eqos_update_ip6_addr; - ops->config_l4_filters = eqos_config_l4_filters; - ops->update_l4_port_no = eqos_update_l4_port_no; - ops->set_systime_to_mac = eqos_set_systime_to_mac; - ops->config_addend = eqos_config_addend; ops->adjust_mactime = eqos_adjust_mactime; - ops->config_tscr = eqos_config_tscr; - ops->config_ssir = eqos_config_ssir; ops->read_mmc = eqos_read_mmc; ops->write_phy_reg = eqos_write_phy_reg; ops->read_phy_reg = eqos_read_phy_reg; + ops->get_hw_features = eqos_get_hw_features; ops->read_reg = eqos_read_reg; ops->write_reg = eqos_write_reg; + ops->set_avb_algorithm = eqos_set_avb_algorithm; + ops->get_avb_algorithm = eqos_get_avb_algorithm; + ops->config_frp = eqos_config_frp; + ops->update_frp_entry = eqos_update_frp_entry; + ops->update_frp_nve = eqos_update_frp_nve; #ifdef MACSEC_SUPPORT ops->read_macsec_reg = eqos_read_macsec_reg; ops->write_macsec_reg = eqos_write_macsec_reg; +#ifndef OSI_STRIPPED_LIB + ops->macsec_config_mac = eqos_config_for_macsec; +#endif /* !OSI_STRIPPED_LIB */ #endif /* MACSEC_SUPPORT */ - ops->get_hw_features = eqos_get_hw_features; + ops->config_l3l4_filters = eqos_config_l3l4_filters; #ifndef OSI_STRIPPED_LIB ops->config_tx_status = eqos_config_tx_status; ops->config_rx_crc_check = eqos_config_rx_crc_check; ops->config_flow_control = eqos_config_flow_control; ops->config_arp_offload = eqos_config_arp_offload; ops->config_ptp_offload = eqos_config_ptp_offload; - ops->validate_regs = eqos_validate_core_regs; - ops->flush_mtl_tx_queue = eqos_flush_mtl_tx_queue; - ops->set_avb_algorithm = eqos_set_avb_algorithm; - ops->get_avb_algorithm = eqos_get_avb_algorithm; ops->config_vlan_filtering = eqos_config_vlan_filtering; ops->reset_mmc = eqos_reset_mmc; ops->configure_eee = eqos_configure_eee; - ops->save_registers = eqos_save_registers; - ops->restore_registers = eqos_restore_registers; ops->set_mdc_clk_rate = eqos_set_mdc_clk_rate; ops->config_mac_loopback = eqos_config_mac_loopback; -#endif /* !OSI_STRIPPED_LIB */ - ops->hw_config_est = eqos_hw_config_est; - ops->hw_config_fpe = eqos_hw_config_fpe; - ops->config_ptp_rxq = eqos_config_ptp_rxq; - ops->config_frp = eqos_config_frp; - ops->update_frp_entry = eqos_update_frp_entry; - ops->update_frp_nve = eqos_update_frp_nve; ops->config_rss = eqos_config_rss; -#ifdef MACSEC_SUPPORT - ops->macsec_config_mac = eqos_config_for_macsec; -#endif /* MACSEC_SUPPORT */ - ops->ptp_tsc_capture = eqos_ptp_tsc_capture; + ops->config_ptp_rxq = eqos_config_ptp_rxq; +#endif /* !OSI_STRIPPED_LIB */ #ifdef HSI_SUPPORT ops->core_hsi_configure = eqos_hsi_configure; + ops->core_hsi_inject_err = eqos_hsi_inject_err; #endif } diff --git a/kernel/nvethernetrm/osi/core/eqos_core.h b/kernel/nvethernetrm/osi/core/eqos_core.h index c3b503a6ff..68000e8057 100644 --- a/kernel/nvethernetrm/osi/core/eqos_core.h +++ b/kernel/nvethernetrm/osi/core/eqos_core.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,6 +24,95 @@ #define INCLUDED_EQOS_CORE_H #ifndef OSI_STRIPPED_LIB +#define EQOS_MAC_PFR 0x0008 +#define EQOS_MAC_LPI_CSR 0x00D0 +#define EQOS_MAC_LPI_TIMER_CTRL 0x00D4 +#define EQOS_MAC_LPI_EN_TIMER 0x00D8 +#define EQOS_MAC_RX_FLW_CTRL 0x0090 +#define EQOS_MAC_STNSR 0x0B0C +#define EQOS_MAC_STSR 0x0B08 +#define EQOS_MAC_MA0LR 0x0304 +#define EQOS_MAC_PIDR0 0x0BC4 +#define EQOS_MAC_PTO_CR 0x0BC0 +#define EQOS_MAC_PIDR1 0x0BC8 +#define EQOS_MAC_PIDR2 0x0BCC +#define EQOS_MAC_PMTCSR 0x00C0 +#define EQOS_MAC_QX_TX_FLW_CTRL(x) ((0x0004U * (x)) + 0x0070U) +#define EQOS_MAC_MA0HR 0x0300 +#define EQOS_4_10_MAC_ARPPA 0x0AE0 +#define EQOS_5_00_MAC_ARPPA 0x0210 +#define EQOS_CLOCK_CTRL_0 0x8000U +#define EQOS_APB_ERR_STATUS 0x8214U + +#define EQOS_MAC_PFR_VTFE OSI_BIT(16) +#define EQOS_MAC_PFR_IPFE OSI_BIT(20) +#define EQOS_MAC_PFR_IPFE_SHIFT 20U +#define EQOS_MAC_MA0HR_IDX 11U +#define EQOS_5_30_SID 0x3U +#define EQOS_5_30_SID_CH3 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_24) +#define EQOS_5_30_SID_CH2 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_16) +#define EQOS_5_30_SID_CH1 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_8) +#define EQOS_5_30_SID_CH7 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_24) +#define EQOS_5_30_SID_CH6 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_16) +#define EQOS_5_30_SID_CH5 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_8) +#define EQOS_5_30_ASID_CTRL_VAL ((EQOS_5_30_SID_CH3) |\ + (EQOS_5_30_SID_CH2) |\ + (EQOS_5_30_SID_CH1) |\ + (EQOS_5_30_SID)) +#define EQOS_5_30_ASID1_CTRL_VAL ((EQOS_5_30_SID_CH7) |\ + (EQOS_5_30_SID_CH6) |\ + (EQOS_5_30_SID_CH5) |\ + (EQOS_5_30_SID)) +#define EQOS_MAC_MA0HR_MASK 0xFFFFFU +#define EQOS_MAC_IMR_MASK 0x67039U +#define EQOS_MAC_HTR_MASK 0xFFFFFFFFU +#define EQOS_MAC_HTR0_IDX 2U +#define EQOS_MAC_HTR_REG(x) ((0x0004U * (x)) + 0x0010U) +#define EQOS_DMA_SBUS_MASK 0xDF1F3CFFU +#define EQOS_DMA_CHX_STATUS_FBE OSI_BIT(10) +#define EQOS_DMA_CHX_STATUS_TBU OSI_BIT(2) +#define EQOS_DMA_CHX_STATUS_RBU OSI_BIT(7) +#define EQOS_DMA_CHX_STATUS_RPS OSI_BIT(8) +#define EQOS_DMA_CHX_STATUS_RWT OSI_BIT(9) +#define EQOS_DMA_CHX_STATUS_TPS OSI_BIT(1) +#define EQOS_MAC_RQC0R_MASK 0xFFU +#define EQOS_MAC_QX_TX_FLW_CTRL_TFE OSI_BIT(1) +#define EQOS_MAC_QX_TXFC_MASK 0xFFFF00F2U +#define EQOS_MAC_Q0_TXFC_IDX 6U +#define EQOS_MAC_PTO_CR_ASYNCEN OSI_BIT(1) +#define EQOS_MAC_RQC1R_OMCBCQ OSI_BIT(28) +#define EQOS_MAC_PIDR_PID_MASK 0XFFFFU +#define EQOS_MAC_PFR_MASK 0x803107FFU +#define EQOS_MAC_PAUSE_TIME 0xFFFF0000U +#define EQOS_MAC_PAUSE_TIME_MASK 0xFFFF0000U +#define EQOS_MAC_MCR_MASK 0xFFFFFF7FU +#define EQOS_MAC_MA0LR_IDX 12U +#define EQOS_MAC_MA0LR_MASK 0xFFFFFFFFU +#define EQOS_MAC_PTO_CR_DN (OSI_BIT(15) | OSI_BIT(14) | \ + OSI_BIT(13) | OSI_BIT(12) | \ + OSI_BIT(11) | OSI_BIT(10) | \ + OSI_BIT(9) | OSI_BIT(8)) +#define EQOS_MAC_PTO_CR_DN_SHIFT 8U +#define EQOS_MAC_PTO_CR_APDREQEN OSI_BIT(2) +#define EQOS_MAC_PTO_CR_PTOEN OSI_BIT(0) + +#define EQOS_MCR_IPG_MASK 0x7000000U +#define EQOS_MCR_IPG_SHIFT 24U +#define EQOS_MCR_IPG 0x7U +#define EQOS_MAC_TCR_TSENMACADDR OSI_BIT(18) +#define EQOS_MAC_TCR_SNAPTYPSEL_SHIFT 16U +#define EQOS_MAC_TAR_IDX 15U +#define EQOS_MAC_SSIR_IDX 14U +#define EQOS_MAC_RX_FLW_CTRL_RFE OSI_BIT(0) +#define EQOS_MAC_TCR_MASK 0x1107FF03U +#define EQOS_MAC_TAR_MASK 0xFFFFFFFFU +#define EQOS_MAC_SSIR_MASK 0xFFFF00U +#define EQOS_MAC_RQC2R_MASK 0xFFFFFFFFU +#define EQOS_MAC_RQC1R_TPQC (OSI_BIT(22) | OSI_BIT(23)) +#define EQOS_MAC_RQC1R_TPQC0 OSI_BIT(22) +#define EQOS_MAC_RQC1R_PTPQ (OSI_BIT(6) | OSI_BIT(5) | \ + OSI_BIT(4)) +#define EQOS_MAC_RQC1R_PTPQ_SHIFT 4U /** * @addtogroup EQOS-MDC MDC Clock Selection defines * @@ -39,35 +128,117 @@ #define EQOS_CSR_300_500M 0x6 /* MDC = clk_csr/204 */ #define EQOS_CSR_500_800M 0x7 /* MDC = clk_csr/324 */ /** @} */ +#define EQOS_MAC_LPI_CSR_LPITE OSI_BIT(20) +#define EQOS_MAC_LPI_CSR_LPITXA OSI_BIT(19) +#define EQOS_MAC_LPI_CSR_PLS OSI_BIT(17) +#define EQOS_MAC_LPI_CSR_LPIEN OSI_BIT(16) #endif /* !OSI_STRIPPED_LIB */ +#define EQOS_MTL_EST_CONTROL 0x0C50 +#define EQOS_MTL_EST_OVERHEAD 0x0C54 +#define EQOS_MTL_EST_STATUS 0x0C58 +#define EQOS_MTL_EST_SCH_ERR 0x0C60 +#define EQOS_MTL_EST_FRMS_ERR 0x0C64 +#define EQOS_MTL_EST_ITRE 0x0C70 +#define EQOS_MTL_EST_GCL_CONTROL 0x0C80 +#define EQOS_MTL_EST_DATA 0x0C84 +#define EQOS_MTL_FPE_CTS 0x0C90 +#define EQOS_MTL_FPE_ADV 0x0C94 +#define EQOS_MTL_RXP_CS 0x0CA0 +#define EQOS_MTL_RXP_INTR_CS 0x0CA4 +#define EQOS_MTL_RXP_IND_CS 0x0CB0 +#define EQOS_MTL_RXP_IND_DATA 0x0CB4 +#define EQOS_MTL_TXQ_ETS_CR(x) ((0x0040U * (x)) + 0x0D10U) +#define EQOS_MTL_TXQ_ETS_SSCR(x) ((0x0040U * (x)) + 0x0D1CU) +#define EQOS_MTL_TXQ_ETS_HCR(x) ((0x0040U * (x)) + 0x0D20U) +#define EQOS_MTL_TXQ_ETS_LCR(x) ((0x0040U * (x)) + 0x0D24U) +#define EQOS_MTL_INTR_STATUS 0x0C20 +#define EQOS_MTL_OP_MODE 0x0C00 +#define EQOS_MAC_FPE_CTS 0x0234 +#define EQOS_IMR_FPEIE OSI_BIT(17) +#define EQOS_MTL_FRP_IE2_DCH_SHIFT 24U +#define EQOS_DMA_ISR_MTLIS OSI_BIT(16) /** - * @addtogroup EQOS-SIZE SIZE calculation helper Macros + * @addtogroup EQOS-MTL-FRP FRP Indirect Access register defines * - * @brief SIZE calculation defines + * @brief EQOS MTL FRP register defines * @{ */ -#define FIFO_SIZE_B(x) (x) -#define FIFO_SIZE_KB(x) ((x) * 1024U) -/** @} */ +#define EQOS_MTL_FRP_READ_UDELAY 1U +#define EQOS_MTL_FRP_READ_RETRY 10000U -/** - * @addtogroup EQOS-QUEUE QUEUE fifo size programmable values - * - * @brief Queue FIFO size programmable values - * @{ - */ -#define EQOS_256 0x00U -#define EQOS_512 0x01U -#define EQOS_1K 0x03U -#define EQOS_2K 0x07U -#define EQOS_4K 0x0FU -#define EQOS_8K 0x1FU -#define EQOS_9K 0x23U -#define EQOS_16K 0x3FU -#define EQOS_32K 0x7FU -#define EQOS_36K 0x8FU +/* FRP Control and Status register defines */ +#define EQOS_MTL_RXP_CS_RXPI OSI_BIT(31) +#define EQOS_MTL_RXP_CS_NPE (OSI_BIT(23) | OSI_BIT(22) | \ + OSI_BIT(21) | OSI_BIT(20) | \ + OSI_BIT(19) | OSI_BIT(18) | \ + OSI_BIT(17) | OSI_BIT(16)) +#define EQOS_MTL_RXP_CS_NPE_SHIFT 16U +#define EQOS_MTL_RXP_CS_NVE (OSI_BIT(7) | OSI_BIT(6) | \ + OSI_BIT(5) | OSI_BIT(4) | \ + OSI_BIT(3) | OSI_BIT(2) | \ + OSI_BIT(1) | OSI_BIT(0)) +/* Indirect register defines */ +#define EQOS_MTL_RXP_IND_CS_BUSY OSI_BIT(31) +#define EQOS_MTL_RXP_IND_CS_WRRDN OSI_BIT(16) +#define EQOS_MTL_RXP_IND_CS_ADDR (OSI_BIT(9) | OSI_BIT(8) | \ + OSI_BIT(7) | OSI_BIT(6) | \ + OSI_BIT(5) | OSI_BIT(4) | \ + OSI_BIT(3) | OSI_BIT(2) | \ + OSI_BIT(1) | OSI_BIT(0)) /** @} */ +/* FRP Interrupt Control and Status register */ +#define EQOS_MTL_RXP_INTR_CS_PDRFIE OSI_BIT(19) +#define EQOS_MTL_RXP_INTR_CS_FOOVIE OSI_BIT(18) +#define EQOS_MTL_RXP_INTR_CS_NPEOVIE OSI_BIT(17) +#define EQOS_MTL_RXP_INTR_CS_NVEOVIE OSI_BIT(16) +#define EQOS_MTL_RXP_INTR_CS_PDRFIS OSI_BIT(3) +#define EQOS_MTL_RXP_INTR_CS_FOOVIS OSI_BIT(2) +#define EQOS_MTL_RXP_INTR_CS_NPEOVIS OSI_BIT(1) +#define EQOS_MTL_RXP_INTR_CS_NVEOVIS OSI_BIT(0) + +#ifndef OSI_STRIPPED_LIB +#define EQOS_RXQ_DMA_MAP0_MASK 0x13131313U +#define EQOS_MTL_TXQ_QW_MASK 0x1FFFFFU +#define EQOS_PAD_AUTO_CAL_CFG_MASK 0x7FFFFFFFU +#define EQOS_MTL_TXQ_OP_MODE_MASK 0xFF007EU +#define EQOS_MTL_RXQ_OP_MODE_MASK 0xFFFFFFBU +#define EQOS_MAC_RQC1R_MASK 0xF77077U +#endif /* !OSI_STRIPPED_LIB */ +#define EQOS_MTL_TXQ_ETS_SSCR_SSC_MASK 0x00003FFFU +#define EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK 0x000FFFFFU +#define EQOS_MTL_TXQ_ETS_HCR_HC_MASK 0x1FFFFFFFU +#define EQOS_MTL_TXQ_ETS_LCR_LC_MASK 0x1FFFFFFFU +#define EQOS_MTL_TXQ_ETS_CR_CC OSI_BIT(3) +#define EQOS_MTL_TXQ_ETS_CR_AVALG OSI_BIT(2) +#define EQOS_MTL_TXQ_ETS_CR_CC_SHIFT 3U +#define EQOS_MTL_TXQ_ETS_CR_AVALG_SHIFT 2U +#define EQOS_MAC_RQC1R_FPRQ (OSI_BIT(26) | OSI_BIT(25) | \ + OSI_BIT(24)) +#define EQOS_MAC_RQC1R_FPRQ_SHIFT 24U +/* Indirect Instruction Table defines */ +#define EQOS_MTL_FRP_IE0(x) (((x) * 0x4U) + 0x0U) +#define EQOS_MTL_FRP_IE1(x) (((x) * 0x4U) + 0x1U) +#define EQOS_MTL_FRP_IE2(x) (((x) * 0x4U) + 0x2U) +#define EQOS_MTL_FRP_IE3(x) (((x) * 0x4U) + 0x3U) +#define EQOS_MTL_FRP_IE2_DCH (OSI_BIT(31) | OSI_BIT(30) | \ + OSI_BIT(29) | OSI_BIT(28) | \ + OSI_BIT(27) | OSI_BIT(26) | \ + OSI_BIT(25) | OSI_BIT(24)) +#define EQOS_MTL_FRP_IE2_OKI (OSI_BIT(23) | OSI_BIT(22) | \ + OSI_BIT(21) | OSI_BIT(20) | \ + OSI_BIT(19) | OSI_BIT(18) | \ + OSI_BIT(17) | OSI_BIT(16)) +#define EQOS_MTL_FRP_IE2_OKI_SHIFT 16U +#define EQOS_MTL_FRP_IE2_FO (OSI_BIT(13) | OSI_BIT(12) | \ + OSI_BIT(11) | OSI_BIT(10) | \ + OSI_BIT(9) | OSI_BIT(8)) +#define EQOS_MTL_FRP_IE2_FO_SHIFT 8U +#define EQOS_MTL_FRP_IE2_NC OSI_BIT(3) +#define EQOS_MTL_FRP_IE2_IM OSI_BIT(2) +#define EQOS_MTL_FRP_IE2_RF OSI_BIT(1) +#define EQOS_MTL_FRP_IE2_AF OSI_BIT(0) + /** * @addtogroup EQOS-HW Hardware Register offsets * @@ -76,63 +247,52 @@ */ #define EQOS_MAC_MCR 0x0000 #define EQOS_MAC_EXTR 0x0004 -#define EQOS_MAC_PFR 0x0008 -#define EQOS_MAC_WATCH 0x000C -#define EQOS_MAC_HTR_REG(x) ((0x0004U * (x)) + 0x0010U) #define EQOS_MAC_VLAN_TAG 0x0050 #define EQOS_MAC_VLANTIR 0x0060 -#define EQOS_MAC_QX_TX_FLW_CTRL(x) ((0x0004U * (x)) + 0x0070U) -#define EQOS_MAC_RX_FLW_CTRL 0x0090 #define EQOS_MAC_RQC0R 0x00A0 #define EQOS_MAC_RQC1R 0x00A4 #define EQOS_MAC_RQC2R 0x00A8 #define EQOS_MAC_ISR 0x00B0 #define EQOS_MAC_IMR 0x00B4 -#define EQOS_MAC_PMTCSR 0x00C0 -#define EQOS_MAC_LPI_CSR 0x00D0 -#define EQOS_MAC_LPI_TIMER_CTRL 0x00D4 -#define EQOS_MAC_LPI_EN_TIMER 0x00D8 #ifndef OSI_STRIPPED_LIB #define EQOS_MAC_1US_TIC_CNTR 0x00DC -#endif /* !OSI_STRIPPED_LIB */ #define EQOS_MAC_ANS 0x00E4 +#endif /* !OSI_STRIPPED_LIB */ #define EQOS_MAC_PCS 0x00F8 + +#ifdef UPDATED_PAD_CAL #define EQOS_MAC_DEBUG 0x0114 +#define EQOS_MAC_DEBUG_RPESTS OSI_BIT(0) +#define EQOS_MAC_DEBUG_TPESTS OSI_BIT(16) +#endif + #define EQOS_MAC_MDIO_ADDRESS 0x0200 #define EQOS_MAC_MDIO_DATA 0x0204 -#define EQOS_5_00_MAC_ARPPA 0x0210 -#define EQOS_MAC_CSR_SW_CTL 0x0230 -#define EQOS_MAC_FPE_CTS 0x0234 -#define EQOS_MAC_MA0HR 0x0300 #define EQOS_MAC_ADDRH(x) ((0x0008U * (x)) + 0x0300U) -#define EQOS_MAC_MA0LR 0x0304 #define EQOS_MAC_ADDRL(x) ((0x0008U * (x)) + 0x0304U) #define EQOS_MMC_CNTRL 0x0700 #define EQOS_MMC_TX_INTR_MASK 0x0710 #define EQOS_MMC_RX_INTR_MASK 0x070C #define EQOS_MMC_IPC_RX_INTR_MASK 0x0800 #define EQOS_MAC_L3L4_CTR(x) ((0x0030U * (x)) + 0x0900U) -#define EQOS_MAC_L4_ADR(x) ((0x0030U * (x)) + 0x0904U) -#define EQOS_MAC_L3_AD0R(x) ((0x0030U * (x)) + 0x0910U) #define EQOS_MAC_L3_AD1R(x) ((0x0030U * (x)) + 0x0914U) +#ifndef OSI_STRIPPED_LIB +#define EQOS_MAC_L3_AD0R(x) ((0x0030U * (x)) + 0x0910U) #define EQOS_MAC_L3_AD2R(x) ((0x0030U * (x)) + 0x0918U) #define EQOS_MAC_L3_AD3R(x) ((0x0030U * (x)) + 0x091CU) -#define EQOS_4_10_MAC_ARPPA 0x0AE0 +#define EQOS_MAC_L4_ADR(x) ((0x0030U * (x)) + 0x0904U) +#endif /* !OSI_STRIPPED_LIB */ #define EQOS_MAC_TCR 0x0B00 #define EQOS_MAC_SSIR 0x0B04 -#define EQOS_MAC_STSR 0x0B08 -#define EQOS_MAC_STNSR 0x0B0C #define EQOS_MAC_STSUR 0x0B10 #define EQOS_MAC_STNSUR 0x0B14 #define EQOS_MAC_TAR 0x0B18 -#define EQOS_MAC_PTO_CR 0x0BC0 -#define EQOS_MAC_PIDR0 0x0BC4 -#define EQOS_MAC_PIDR1 0x0BC8 -#define EQOS_MAC_PIDR2 0x0BCC #define EQOS_MAC_PPS_CTL 0x0B70 #define EQOS_DMA_BMR 0x1000 #define EQOS_DMA_SBUS 0x1004 #define EQOS_DMA_ISR 0x1008 +#define EQOS_PTP_CLK_SPEED 208333334U +#define EQOS_X_PTP_CLK_SPEED 312500000U /** @} */ /** @@ -141,36 +301,11 @@ * @brief EQOS MTL HW Register offsets * @{ */ -#define EQOS_MTL_OP_MODE 0x0C00 -#define EQOS_MTL_INTR_STATUS 0x0C20 #define EQOS_MTL_RXQ_DMA_MAP0 0x0C30 #define EQOS_MTL_RXQ_DMA_MAP1 0x0C34 -#define EQOS_MTL_EST_CONTROL 0x0C50 -#define EQOS_MTL_EST_OVERHEAD 0x0C54 -#define EQOS_MTL_EST_STATUS 0x0C58 -#define EQOS_MTL_EST_SCH_ERR 0x0C60 -#define EQOS_MTL_EST_FRMS_ERR 0x0C64 -#define EQOS_MTL_EST_FRMC_ERR 0x0C68 -#define EQOS_MTL_EST_ITRE 0x0C70 -#define EQOS_MTL_EST_GCL_CONTROL 0x0C80 -#define EQOS_MTL_EST_DATA 0x0C84 -#define EQOS_MTL_FPE_CTS 0x0C90 -#define EQOS_MTL_FPE_ADV 0x0C94 -#define EQOS_MTL_RXP_CS 0x0CA0 -#define EQOS_MTL_RXP_INTR_CS 0x0CA4 -#define EQOS_MTL_RXP_DROP_CNT 0x0CA8 -#define EQOS_MTL_RXP_ERROR_CNT 0x0CAC -#define EQOS_MTL_RXP_IND_CS 0x0CB0 -#define EQOS_MTL_RXP_IND_DATA 0x0CB4 #define EQOS_MTL_CHX_TX_OP_MODE(x) ((0x0040U * (x)) + 0x0D00U) -#define EQOS_MTL_TXQ_DEBUG(x) ((0x0040U * (x)) + 0x0D08U) -#define EQOS_MTL_TXQ_ETS_CR(x) ((0x0040U * (x)) + 0x0D10U) #define EQOS_MTL_TXQ_QW(x) ((0x0040U * (x)) + 0x0D18U) -#define EQOS_MTL_TXQ_ETS_SSCR(x) ((0x0040U * (x)) + 0x0D1CU) -#define EQOS_MTL_TXQ_ETS_HCR(x) ((0x0040U * (x)) + 0x0D20U) -#define EQOS_MTL_TXQ_ETS_LCR(x) ((0x0040U * (x)) + 0x0D24U) #define EQOS_MTL_CHX_RX_OP_MODE(x) ((0x0040U * (x)) + 0x0D30U) -#define EQOS_MTL_RXQ_DEBUG(x) ((0x0040U * (x)) + 0x0D38U) /** @} */ /** @@ -179,8 +314,6 @@ * @brief EQOS Wrapper register offsets * @{ */ -#define EQOS_CLOCK_CTRL_0 0x8000U -#define EQOS_APB_ERR_STATUS 0x8214U #define EQOS_AXI_ASID_CTRL 0x8400U #define EQOS_AXI_ASID1_CTRL 0x8404U #define EQOS_PAD_CRTL 0x8800U @@ -189,16 +322,15 @@ #define EQOS_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U)) #define VIRTUAL_APB_ERR_CTRL 0x8300 #define EQOS_WRAP_COMMON_INTR_ENABLE 0x8704 + +#ifdef HSI_SUPPORT #define EQOS_REGISTER_PARITY_ERR OSI_BIT(5) #define EQOS_CORE_CORRECTABLE_ERR OSI_BIT(4) #define EQOS_CORE_UNCORRECTABLE_ERR OSI_BIT(3) +#endif + #define EQOS_MAC_SBD_INTR OSI_BIT(2) #define EQOS_WRAP_COMMON_INTR_STATUS 0x8708 -#define EQOS_WRAP_SYNC_TSC_PTP_CAPTURE 0x800CU -#define EQOS_WRAP_TSC_CAPTURE_LOW 0x8010U -#define EQOS_WRAP_TSC_CAPTURE_HIGH 0x8014U -#define EQOS_WRAP_PTP_CAPTURE_LOW 0x8018U -#define EQOS_WRAP_PTP_CAPTURE_HIGH 0x801CU /** @} */ @@ -217,15 +349,13 @@ #define EQOS_PAD_AUTO_CAL_CFG_START OSI_BIT(31) #define EQOS_PAD_AUTO_CAL_STAT_ACTIVE OSI_BIT(31) #define EQOS_PAD_CRTL_E_INPUT_OR_E_PWRD OSI_BIT(31) -#define EQOS_MCR_IPG_MASK 0x7000000U -#define EQOS_MCR_IPG_SHIFT 24U -#define EQOS_MCR_IPG 0x7U +#define EQOS_PAD_CRTL_PD_OFFSET_MASK 0x1F00U +#define EQOS_PAD_CRTL_PU_OFFSET_MASK 0x1FU #define EQOS_MCR_IPC OSI_BIT(27) #define EQOS_MMC_CNTRL_CNTRST OSI_BIT(0) #define EQOS_MMC_CNTRL_RSTONRD OSI_BIT(2) #define EQOS_MMC_CNTRL_CNTPRST OSI_BIT(4) #define EQOS_MMC_CNTRL_CNTPRSTLVL OSI_BIT(5) -#define EQOS_MTL_QTOMR_FTQ OSI_BIT(0) #define EQOS_MTL_TSF OSI_BIT(1) #define EQOS_MTL_TXQEN OSI_BIT(3) #define EQOS_MTL_RSF OSI_BIT(5) @@ -242,12 +372,6 @@ #define EQOS_MCR_CST OSI_BIT(21) #define EQOS_MCR_GPSLCE OSI_BIT(23) #define EQOS_IMR_RGSMIIIE OSI_BIT(0) -#define EQOS_IMR_PCSLCHGIE OSI_BIT(1) -#define EQOS_IMR_PCSANCIE OSI_BIT(2) -#define EQOS_IMR_PMTIE OSI_BIT(4) -#define EQOS_IMR_LPIIE OSI_BIT(5) -#define EQOS_IMR_TXESIE OSI_BIT(13) -#define EQOS_IMR_FPEIE OSI_BIT(17) #define EQOS_MAC_PCS_LNKSTS OSI_BIT(19) #define EQOS_MAC_PCS_LNKMOD OSI_BIT(16) #define EQOS_MAC_PCS_LNKSPEED (OSI_BIT(17) | OSI_BIT(18)) @@ -260,15 +384,10 @@ #define EQOS_MAC_VLANTR_DOVLTC OSI_BIT(20) #define EQOS_MAC_VLANTR_ERIVLT OSI_BIT(27) #define EQOS_MAC_VLANTIRR_CSVL OSI_BIT(19) -#define EQOS_MAC_DEBUG_RPESTS OSI_BIT(0) -#define EQOS_MAC_DEBUG_TPESTS OSI_BIT(16) #define EQOS_DMA_SBUS_BLEN8 OSI_BIT(2) #define EQOS_DMA_SBUS_BLEN16 OSI_BIT(3) #define EQOS_DMA_SBUS_EAME OSI_BIT(11) -#define EQOS_DMA_BMR_SWR OSI_BIT(0) #define EQOS_DMA_BMR_DPSW OSI_BIT(8) -#define EQOS_MAC_RQC1R_TPQC (OSI_BIT(22) | OSI_BIT(23)) -#define EQOS_MAC_RQC1R_TPQC0 OSI_BIT(22) #define EQOS_MAC_RQC1R_MCBCQ (OSI_BIT(18) | OSI_BIT(17) |\ OSI_BIT(16)) #define EQOS_MAC_RQC1R_MCBCQ_SHIFT 16U @@ -276,162 +395,62 @@ #define EQOS_MAC_RQC1R_MCBCQ7 0x7U #define EQOS_MAC_RQC1R_MCBCQEN OSI_BIT(20) -#define EQOS_MAC_RQC1R_FPRQ (OSI_BIT(26) | OSI_BIT(25) | \ - OSI_BIT(24)) -#define EQOS_MAC_RQC1R_FPRQ_SHIFT 24U -#define EQOS_MAC_RQC1R_PTPQ (OSI_BIT(6) | OSI_BIT(5) | \ - OSI_BIT(4)) -#define EQOS_MAC_RQC1R_OMCBCQ OSI_BIT(28) -#define EQOS_MAC_RQC1R_PTPQ_SHIFT 4U -#define EQOS_MAC_PPS_CTL_PPSCTRL0 (OSI_BIT(3) | OSI_BIT(2) |\ - OSI_BIT(1) | OSI_BIT(0)) -#define EQOS_MTL_QTOMR_FTQ_LPOS OSI_BIT(0) -#define EQOS_DMA_ISR_MTLIS OSI_BIT(16) #define EQOS_DMA_ISR_MACIS OSI_BIT(17) + +#ifdef HSI_SUPPORT #define EQOS_DMA_ISR_TXSTSIS OSI_BIT(13) +#define EQOS_IMR_TXESIE OSI_BIT(13) +#endif + #define EQOS_MAC_ISR_RGSMIIS OSI_BIT(0) #define EQOS_MAC_IMR_FPEIS OSI_BIT(17) #define EQOS_MTL_TXQ_QW_ISCQW OSI_BIT(4) +#define EQOS_RXQ_EN_MASK (OSI_BIT(0) | OSI_BIT(1)) #define EQOS_DMA_SBUS_RD_OSR_LMT 0x001F0000U #define EQOS_DMA_SBUS_WR_OSR_LMT 0x1F000000U #define EQOS_MTL_TXQ_SIZE_SHIFT 16U #define EQOS_MTL_RXQ_SIZE_SHIFT 20U #ifndef OSI_STRIPPED_LIB #define EQOS_MAC_ENABLE_LM OSI_BIT(12) -#define EQOS_MAC_VLANTIRR_VLTI OSI_BIT(20) -#define EQOS_DMA_SBUS_BLEN4 OSI_BIT(1) -#define EQOS_IMR_LPIIE OSI_BIT(5) -#define EQOS_IMR_PCSLCHGIE OSI_BIT(1) -#define EQOS_IMR_PCSANCIE OSI_BIT(2) -#define EQOS_IMR_PMTIE OSI_BIT(4) -#define EQOS_MAC_ISR_LPIIS OSI_BIT(5) -#define EQOS_MAC_LPI_CSR_LPITE OSI_BIT(20) -#define EQOS_MAC_LPI_CSR_LPITXA OSI_BIT(19) -#define EQOS_MAC_LPI_CSR_PLS OSI_BIT(17) -#define EQOS_MAC_LPI_CSR_LPIEN OSI_BIT(16) #define EQOS_MCR_ARPEN OSI_BIT(31) #define EQOS_RX_CLK_SEL OSI_BIT(8) -#define EQOS_MTL_TXQ_ETS_SSCR_SSC_MASK 0x00003FFFU -#define EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK 0x000FFFFFU -#define EQOS_MTL_TXQ_ETS_HCR_HC_MASK 0x1FFFFFFFU -#define EQOS_MTL_TXQ_ETS_LCR_LC_MASK 0x1FFFFFFFU -#define EQOS_MTL_TXQ_ETS_CR_SLC_MASK (OSI_BIT(6) | OSI_BIT(5) | \ - OSI_BIT(4)) -#define EQOS_MTL_TXQ_ETS_CR_CC OSI_BIT(3) -#define EQOS_MTL_TXQ_ETS_CR_AVALG OSI_BIT(2) -#define EQOS_MTL_TXQ_ETS_CR_CC_SHIFT 3U -#define EQOS_MTL_TXQ_ETS_CR_AVALG_SHIFT 2U -#define EQOS_MTL_TXQEN_MASK (OSI_BIT(3) | OSI_BIT(2)) -#define EQOS_MTL_TXQEN_MASK_SHIFT 2U #define EQOS_MTL_OP_MODE_DTXSTS OSI_BIT(1) #define EQOS_MAC_VLAN_TR 0x0050U -#define EQOS_MAC_VLAN_TFR 0x0054U -#define EQOS_MAC_VLAN_HTR 0x0058U -#define EQOS_MAC_VLAN_TR_ETV OSI_BIT(16) #define EQOS_MAC_VLAN_TR_VTIM OSI_BIT(17) #define EQOS_MAC_VLAN_TR_VTIM_SHIFT 17 #define EQOS_MAC_VLAN_TR_VTHM OSI_BIT(25) -#define EQOS_MAC_VLAN_TR_VL 0xFFFFU -#define EQOS_MAC_VLAN_HTR_VLHT 0xFFFFU #define EQOS_MAC_STNSR_TSSS_MASK 0x7FFFFFFFU -#define EQOS_MAC_VLAN_TR_ETV_SHIFT 16U -#define EQOS_MAC_PFR_HUC OSI_BIT(1) -#define EQOS_MAC_PFR_HMC OSI_BIT(2) -#define EQOS_MAC_MAX_HTR_REG_LEN 8U -#define EQOS_MAC_L3L4_CTR_L3HSBM0 (OSI_BIT(6) | OSI_BIT(7) | \ - OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10)) -#define EQOS_MAC_L3L4_CTR_L3HDBM0 (OSI_BIT(11) | OSI_BIT(12) | \ - OSI_BIT(13) | OSI_BIT(14) | \ - OSI_BIT(15)) #define EQOS_MAC_PFR_SHIFT 16 -#define EQOS_MTL_RXQ_OP_MODE_FEP OSI_BIT(4) -#define EQOS_MTL_OP_MODE_FRPE OSI_BIT(15) #define EQOS_MTL_OP_MODE_DTXSTS OSI_BIT(1) -#define EQOS_MAC_EXTR_PDC OSI_BIT(19) -#define EQOS_MTL_TXQ_DEBUG_TRCSTS 0x6U -#define EQOS_MTL_TXQ_DEBUG_TXQSTS OSI_BIT(4) -#define EQOS_MTL_RXQ_DEBUG_PRXQ 0x3FFF0000U -#define EQOS_MTL_RXQ_DEBUG_RXQSTS 0x30U #define EQOS_MAC_EXTR_DCRCC OSI_BIT(16) +#define EQOS_MTL_TXQ_ETS_SSCR_SSC_MASK 0x00003FFFU +#define EQOS_MTL_TXQ_ETS_QW_ISCQW_MASK 0x000FFFFFU +#define EQOS_MTL_TXQ_ETS_HCR_HC_MASK 0x1FFFFFFFU +#define EQOS_MTL_TXQ_ETS_LCR_LC_MASK 0x1FFFFFFFU +#define EQOS_MTL_TXQ_ETS_CR_AVALG OSI_BIT(2) +#define EQOS_MTL_TXQ_ETS_CR_AVALG_SHIFT 2U +#define EQOS_MTL_TXQ_ETS_CR_CC OSI_BIT(3) +#define EQOS_MTL_TXQ_ETS_CR_CC_SHIFT 3U +#define EQOS_MAC_EXTR_PDC OSI_BIT(19) #define EQOS_MAC_EXTR_EIPGEN OSI_BIT(24) #define EQOS_MAC_EXTR_EIPG_MASK 0x3E000000U #define EQOS_MAC_EXTR_EIPG_SHIFT 25U #define EQOS_MAC_EXTR_EIPG 0x3U #endif /* !OSI_STRIPPED_LIB */ -#define EQOS_MTL_RXQ_OP_MODE_FEP OSI_BIT(4) -#define EQOS_MAC_QX_TX_FLW_CTRL_TFE OSI_BIT(1) -#define EQOS_MAC_RX_FLW_CTRL_RFE OSI_BIT(0) -#define EQOS_MAC_PAUSE_TIME 0xFFFF0000U -#define EQOS_MAC_PAUSE_TIME_MASK 0xFFFF0000U +#define EQOS_MTL_TXQEN_MASK (OSI_BIT(3) | OSI_BIT(2)) +#define EQOS_MTL_TXQEN_MASK_SHIFT 2U +#define EQOS_MTL_OP_MODE_FRPE OSI_BIT(15) +#define EQOS_MAC_EXTR_PDC OSI_BIT(19) #define EQOS_MTL_RXQ_OP_MODE_EHFC OSI_BIT(7) #define EQOS_MTL_RXQ_OP_MODE_RFA_SHIFT 8U #define EQOS_MTL_RXQ_OP_MODE_RFA_MASK 0x00003F00U #define EQOS_MTL_RXQ_OP_MODE_RFD_SHIFT 14U #define EQOS_MTL_RXQ_OP_MODE_RFD_MASK 0x000FC000U -#define EQOS_MAC_PFR_PR OSI_BIT(0) -#define EQOS_MAC_PFR_DAIF OSI_BIT(3) -#define EQOS_MAC_PFR_PM OSI_BIT(4) -#define EQOS_MAC_PFR_DBF OSI_BIT(5) -#define EQOS_MAC_PFR_PCF (OSI_BIT(6) | OSI_BIT(7)) -#define EQOS_MAC_PFR_SAIF OSI_BIT(8) -#define EQOS_MAC_PFR_SAF OSI_BIT(9) -#define EQOS_MAC_PFR_HPF OSI_BIT(10) -#define EQOS_MAC_PFR_VTFE OSI_BIT(16) -#define EQOS_MAC_PFR_IPFE OSI_BIT(20) -#define EQOS_MAC_PFR_IPFE_SHIFT 20U -#define EQOS_MAC_PFR_DNTU OSI_BIT(21) -#define EQOS_MAC_PFR_RA OSI_BIT(31) +#ifndef OSI_STRIPPED_LIB #define EQOS_MAC_L4_SP_MASK 0x0000FFFFU #define EQOS_MAC_L4_DP_MASK 0xFFFF0000U #define EQOS_MAC_L4_DP_SHIFT 16 -#define EQOS_MAC_L3L4_CTR_L4SPM0 OSI_BIT(18) -#define EQOS_MAC_L3L4_CTR_L4SPIM0 OSI_BIT(19) -#define EQOS_MAC_L3L4_CTR_L4SPI_SHIFT 19 -#define EQOS_MAC_L3L4_CTR_L4DPM0 OSI_BIT(20) -#define EQOS_MAC_L3L4_CTR_L4DPIM0 OSI_BIT(21) -#define EQOS_MAC_L3L4_CTR_L4DPI_SHIFT 21 -#define EQOS_MAC_L3L4_CTR_L4PEN0 OSI_BIT(16) -#define EQOS_MAC_L3L4_CTR_L4PEN0_SHIFT 16 -#define EQOS_MAC_L3L4_CTR_L3PEN0 OSI_BIT(0) -#define EQOS_MAC_L3L4_CTR_L3SAM0 OSI_BIT(2) -#define EQOS_MAC_L3L4_CTR_L3SAIM0 OSI_BIT(3) -#define EQOS_MAC_L3L4_CTR_L3SAI_SHIFT 3 -#define EQOS_MAC_L3L4_CTR_L3DAM0 OSI_BIT(4) -#define EQOS_MAC_L3L4_CTR_L3DAIM0 OSI_BIT(5) -#define EQOS_MAC_L3L4_CTR_L3DAI_SHIFT 5 -#define EQOS_MAC_L3L4_CTR_DMCHEN0 OSI_BIT(28) -#define EQOS_MAC_L3L4_CTR_DMCHEN0_SHIFT 28 -#define EQOS_MAC_L3L4_CTR_DMCHN0 (OSI_BIT(24) | OSI_BIT(25) | \ - OSI_BIT(26) | OSI_BIT(27)) -#define EQOS_MAC_L3L4_CTR_DMCHN0_SHIFT 24 -#define EQOS_MAC_L3_IP6_CTRL_CLEAR (EQOS_MAC_L3L4_CTR_L3SAM0 | \ - EQOS_MAC_L3L4_CTR_L3SAIM0 | \ - EQOS_MAC_L3L4_CTR_L3DAM0 | \ - EQOS_MAC_L3L4_CTR_L3DAIM0 | \ - EQOS_MAC_L3L4_CTR_DMCHEN0 | \ - EQOS_MAC_L3L4_CTR_DMCHN0) -#define EQOS_MAC_L3_IP4_SA_CTRL_CLEAR (EQOS_MAC_L3L4_CTR_L3SAM0 | \ - EQOS_MAC_L3L4_CTR_L3SAIM0 | \ - EQOS_MAC_L3L4_CTR_DMCHEN0 | \ - EQOS_MAC_L3L4_CTR_DMCHN0) -#define EQOS_MAC_L3_IP4_DA_CTRL_CLEAR (EQOS_MAC_L3L4_CTR_L3DAM0 | \ - EQOS_MAC_L3L4_CTR_L3DAIM0 | \ - EQOS_MAC_L3L4_CTR_DMCHEN0 | \ - EQOS_MAC_L3L4_CTR_DMCHN0) -#define EQOS_MAC_L4_SP_CTRL_CLEAR (EQOS_MAC_L3L4_CTR_L4SPM0 | \ - EQOS_MAC_L3L4_CTR_L4SPIM0 | \ - EQOS_MAC_L3L4_CTR_DMCHEN0 | \ - EQOS_MAC_L3L4_CTR_DMCHN0) -#define EQOS_MAC_L4_DP_CTRL_CLEAR (EQOS_MAC_L3L4_CTR_L4DPM0 | \ - EQOS_MAC_L3L4_CTR_L4DPIM0 | \ - EQOS_MAC_L3L4_CTR_DMCHEN0 | \ - EQOS_MAC_L3L4_CTR_DMCHN0) -#define EQOS_MAC_L3L4_CTRL_ALL (EQOS_MAC_L3_IP6_CTRL_CLEAR | \ - EQOS_MAC_L3_IP4_SA_CTRL_CLEAR | \ - EQOS_MAC_L3_IP4_DA_CTRL_CLEAR | \ - EQOS_MAC_L4_SP_CTRL_CLEAR | \ - EQOS_MAC_L4_DP_CTRL_CLEAR) +#endif /* !OSI_STRIPPED_LIB */ #define EQOS_MAC_ADDRH_DCS (OSI_BIT(23) | OSI_BIT(22) | \ OSI_BIT(21) | OSI_BIT(20) | \ OSI_BIT(19) | OSI_BIT(18) | \ @@ -448,26 +467,8 @@ #define EQOS_MAC_ADDRH_AE OSI_BIT(31) #define EQOS_MAC_RQC2_PSRQ_MASK ((nveu32_t)0xFF) #define EQOS_MAC_RQC2_PSRQ_SHIFT 8U -#define EQOS_MAC_VLAN_TR_ETV_SHIFT 16U -#define EQOS_MAC_MAX_HTR_REG_LEN 8U -#define EQOS_MAC_TCR_TSENMACADDR OSI_BIT(18) -#define EQOS_MAC_TCR_SNAPTYPSEL_SHIFT 16U -#define EQOS_MAC_TCR_TSCTRLSSR OSI_BIT(9) -#define EQOS_MAC_TCR_TSADDREG OSI_BIT(5) -#define EQOS_MAC_TCR_TSINIT OSI_BIT(2) #define EQOS_MAC_TCR_TSUPDT OSI_BIT(3) -#define EQOS_MAC_TCR_TSCFUPDT OSI_BIT(1) -#define EQOS_MAC_PTO_CR_DN (OSI_BIT(15) | OSI_BIT(14) | \ - OSI_BIT(13) | OSI_BIT(12) | \ - OSI_BIT(11) | OSI_BIT(10) | \ - OSI_BIT(9) | OSI_BIT(8)) -#define EQOS_MAC_PTO_CR_DN_SHIFT 8U -#define EQOS_MAC_PTO_CR_APDREQEN OSI_BIT(2) -#define EQOS_MAC_PTO_CR_ASYNCEN OSI_BIT(1) -#define EQOS_MAC_PTO_CR_PTOEN OSI_BIT(0) -#define EQOS_MAC_PIDR_PID_MASK 0XFFFFU #define EQOS_MAC_STNSUR_ADDSUB_SHIFT 31U -#define EQOS_MAC_SSIR_SSINC_SHIFT 16U #define EQOS_MAC_GMIIDR_GD_WR_MASK 0xFFFF0000U #define EQOS_MAC_GMIIDR_GD_MASK 0xFFFFU #define EQOS_MDIO_PHY_ADDR_SHIFT 21U @@ -485,12 +486,6 @@ #define EQOS_MDIO_DATA_REG_DEV_ADDR_SHIFT 16U #define EQOS_DMA_CHAN_INTR_STATUS 0xFU -#define EQOS_DMA_CHX_STATUS_TPS OSI_BIT(1) -#define EQOS_DMA_CHX_STATUS_TBU OSI_BIT(2) -#define EQOS_DMA_CHX_STATUS_RBU OSI_BIT(7) -#define EQOS_DMA_CHX_STATUS_RPS OSI_BIT(8) -#define EQOS_DMA_CHX_STATUS_RWT OSI_BIT(9) -#define EQOS_DMA_CHX_STATUS_FBE OSI_BIT(10) #define EQOS_ASID_CTRL_SHIFT_24 24U #define EQOS_ASID_CTRL_SHIFT_16 16U @@ -511,21 +506,6 @@ (TEGRA_SID_EQOS_CH6) |\ (TEGRA_SID_EQOS_CH5) |\ (TEGRA_SID_EQOS)) -#define EQOS_5_30_SID 0x3U -#define EQOS_5_30_SID_CH3 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_24) -#define EQOS_5_30_SID_CH2 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_16) -#define EQOS_5_30_SID_CH1 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_8) -#define EQOS_5_30_ASID_CTRL_VAL ((EQOS_5_30_SID_CH3) |\ - (EQOS_5_30_SID_CH2) |\ - (EQOS_5_30_SID_CH1) |\ - (EQOS_5_30_SID)) -#define EQOS_5_30_SID_CH7 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_24) -#define EQOS_5_30_SID_CH6 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_16) -#define EQOS_5_30_SID_CH5 ((EQOS_5_30_SID) << EQOS_ASID_CTRL_SHIFT_8) -#define EQOS_5_30_ASID1_CTRL_VAL ((EQOS_5_30_SID_CH7) |\ - (EQOS_5_30_SID_CH6) |\ - (EQOS_5_30_SID_CH5) |\ - (EQOS_5_30_SID)) #define EQOS_MMC_INTR_DISABLE 0xFFFFFFFFU /* MAC FPE control/statusOSI_BITmap */ @@ -534,19 +514,8 @@ #define EQOS_MAC_FPE_CTS_TVER OSI_BIT(18) #define EQOS_MAC_FPE_CTS_RRSP OSI_BIT(17) #define EQOS_MAC_FPE_CTS_RVER OSI_BIT(16) -#define EQOS_MAC_FPE_CTS_SVER OSI_BIT(1) #define EQOS_MAC_FPE_CTS_SRSP OSI_BIT(2) -/* MTL_FPE_CTRL_STS */ -#define EQOS_MTL_FPE_CTS_PEC (OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10) | OSI_BIT(11) | \ - OSI_BIT(12) | OSI_BIT(13) | \ - OSI_BIT(14) | OSI_BIT(15)) -#define EQOS_MTL_FPE_CTS_PEC_SHIFT 8U -#define EQOS_MTL_FPE_CTS_PEC_MAX_SHIFT 16U -/* MTL FPE adv registers */ -#define EQOS_MTL_FPE_ADV_HADV_MASK (0xFFFFU) -#define EQOS_MTL_FPE_ADV_HADV_VAL 100U /* MTL_EST_CONTROL */ #define EQOS_MTL_EST_CONTROL_PTOV (OSI_BIT(24) | OSI_BIT(25) | \ OSI_BIT(26) | OSI_BIT(27) | \ @@ -563,19 +532,10 @@ #define EQOS_MTL_EST_CONTROL_CTOV_SHIFT 12U #define EQOS_MTL_EST_CTOV_RECOMMEND 94U #define EQOS_8PTP_CYCLE 40U -#ifdef MACSEC_SUPPORT -/* MACSEC Recommended value*/ -#define EQOS_MTL_EST_CTOV_MACSEC_RECOMMEND 758U -#endif /* MACSEC_SUPPORT */ -#define EQOS_MTL_EST_CONTROL_TILS (OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10)) #define EQOS_MTL_EST_CONTROL_LCSE (OSI_BIT(6) | OSI_BIT(5)) -#define EQOS_MTL_EST_CONTROL_LCSE_SHIFT 5U #define EQOS_MTL_EST_CONTROL_LCSE_VAL 0U #define EQOS_MTL_EST_CONTROL_DFBS OSI_BIT(5) #define EQOS_MTL_EST_CONTROL_DDBF OSI_BIT(4) -#define EQOS_MTL_EST_CONTROL_QHLBF OSI_BIT(3) -#define EQOS_MTL_EST_CONTROL_SSWL OSI_BIT(1) #define EQOS_MTL_EST_CONTROL_EEST OSI_BIT(0) #define EQOS_MTL_EST_OVERHEAD_OVHD (OSI_BIT(5) | OSI_BIT(4) | \ OSI_BIT(3) | OSI_BIT(2) | \ @@ -583,28 +543,18 @@ #define EQOS_MTL_EST_OVERHEAD_RECOMMEND 0x17U /* EST GCL controlOSI_BITmap */ #define EQOS_MTL_EST_ADDR_SHIFT 8U -#define EQOS_MTL_EST_ADDR_MASK (OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10) | OSI_BIT(11) | \ - OSI_BIT(12) | OSI_BIT(13) | \ - OSI_BIT(14) | OSI_BIT(15) | \ - OSI_BIT(16) | OSI_BIT(17) | \ - OSI_BIT(18) | OSI_BIT(19)) -#define EQOS_MTL_EST_SRWO OSI_BIT(0) -#define EQOS_MTL_EST_GCRR OSI_BIT(2) -#define EQOS_MTL_EST_ERR0 OSI_BIT(20) /* EST GCRA addresses */ -#define EQOS_MTL_EST_BTR_LOW ((unsigned int)0x0 << \ +#define EQOS_MTL_EST_BTR_LOW ((nveu32_t)0x0 << \ EQOS_MTL_EST_ADDR_SHIFT) -#define EQOS_MTL_EST_BTR_HIGH ((unsigned int)0x1 << \ +#define EQOS_MTL_EST_BTR_HIGH ((nveu32_t)0x1 << \ EQOS_MTL_EST_ADDR_SHIFT) -#define EQOS_MTL_EST_CTR_LOW ((unsigned int)0x2 << \ +#define EQOS_MTL_EST_CTR_LOW ((nveu32_t)0x2 << \ EQOS_MTL_EST_ADDR_SHIFT) -#define EQOS_MTL_EST_CTR_HIGH ((unsigned int)0x3 << \ +#define EQOS_MTL_EST_CTR_HIGH ((nveu32_t)0x3 << \ EQOS_MTL_EST_ADDR_SHIFT) -#define EQOS_MTL_EST_CTR_HIGH_MAX 0xFFU -#define EQOS_MTL_EST_TER ((unsigned int)0x4 << \ +#define EQOS_MTL_EST_TER ((nveu32_t)0x4 << \ EQOS_MTL_EST_ADDR_SHIFT) -#define EQOS_MTL_EST_LLR ((unsigned int)0x5 << \ +#define EQOS_MTL_EST_LLR ((nveu32_t)0x5 << \ EQOS_MTL_EST_ADDR_SHIFT) /*EST MTL interrupt STATUS and ERR*/ #define EQOS_MTL_IS_ESTIS OSI_BIT(18) @@ -614,295 +564,18 @@ #define EQOS_MTL_EST_STATUS_HLBF OSI_BIT(2) #define EQOS_MTL_EST_STATUS_BTRE OSI_BIT(1) #define EQOS_MTL_EST_STATUS_SWLC OSI_BIT(0) -#define EQOS_MTL_EST_ITRE_CGCE OSI_BIT(4) -#define EQOS_MTL_EST_ITRE_IEHS OSI_BIT(3) -#define EQOS_MTL_EST_ITRE_IEHF OSI_BIT(2) -#define EQOS_MTL_EST_ITRE_IEBE OSI_BIT(1) -#define EQOS_MTL_EST_ITRE_IECC OSI_BIT(0) +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) +/* MACSEC Recommended value*/ +#define EQOS_MTL_EST_CTOV_MACSEC_RECOMMEND 758U +#endif /* MACSEC_SUPPORT */ +#ifdef UPDATED_PAD_CAL /* EQOS RGMII Rx padctrl registers E_INPUT bit */ -#define EQOS_PADCTL_EQOS_E_INPUT OSI_BIT(6) - +#define EQOS_PADCTL_EQOS_E_INPUT OSI_BIT(6) +#endif /** @} */ void update_ehfc_rfa_rfd(nveu32_t rx_fifo, nveu32_t *value); -/** - * @addtogroup EQOS-Safety-Register EQOS Safety Register Mask - * - * @brief EQOS HW register masks and index - * @{ - */ -#define EQOS_MAC_MCR_MASK 0xFFFFFF7FU -#define EQOS_MAC_PFR_MASK 0x803107FFU -#define EQOS_MAC_HTR_MASK 0xFFFFFFFFU -#define EQOS_MAC_QX_TXFC_MASK 0xFFFF00F2U -#define EQOS_MAC_RQC0R_MASK 0xFFU -#define EQOS_MAC_RQC1R_MASK 0xF77077U -#define EQOS_MAC_RQC2R_MASK 0xFFFFFFFFU -#define EQOS_MAC_IMR_MASK 0x67039U -#define EQOS_MAC_MA0HR_MASK 0xFFFFFU -#define EQOS_MAC_MA0LR_MASK 0xFFFFFFFFU -#define EQOS_MAC_TCR_MASK 0x1107FF03U -#define EQOS_MAC_SSIR_MASK 0xFFFF00U -#define EQOS_MAC_TAR_MASK 0xFFFFFFFFU -#define EQOS_RXQ_DMA_MAP0_MASK 0x13131313U -#define EQOS_RXQ_EN_MASK (OSI_BIT(0) | OSI_BIT(1)) -#define EQOS_MTL_TXQ_OP_MODE_MASK 0xFF007EU -#define EQOS_MTL_TXQ_QW_MASK 0x1FFFFFU -#define EQOS_MTL_RXQ_OP_MODE_MASK 0xFFFFFFBU -#define EQOS_PAD_AUTO_CAL_CFG_MASK 0x7FFFFFFFU -#define EQOS_DMA_SBUS_MASK 0xDF1F3CFFU - -/* To add new registers to validate,append at end of this list and increment - * EQOS_MAX_CORE_SAFETY_REGS. - * Using macro instead of enum due to misra error. - */ -#define EQOS_MAC_MCR_IDX 0U -#define EQOS_MAC_PFR_IDX 1U -#define EQOS_MAC_HTR0_IDX 2U -#define EQOS_MAC_HTR1_IDX 3U -#define EQOS_MAC_HTR2_IDX 4U -#define EQOS_MAC_HTR3_IDX 5U -#define EQOS_MAC_Q0_TXFC_IDX 6U -#define EQOS_MAC_RQC0R_IDX 7U -#define EQOS_MAC_RQC1R_IDX 8U -#define EQOS_MAC_RQC2R_IDX 9U -#define EQOS_MAC_IMR_IDX 10U -#define EQOS_MAC_MA0HR_IDX 11U -#define EQOS_MAC_MA0LR_IDX 12U -#define EQOS_MAC_TCR_IDX 13U -#define EQOS_MAC_SSIR_IDX 14U -#define EQOS_MAC_TAR_IDX 15U -#define EQOS_PAD_AUTO_CAL_CFG_IDX 16U -#define EQOS_MTL_RXQ_DMA_MAP0_IDX 17U -#define EQOS_MTL_CH0_TX_OP_MODE_IDX 18U -#define EQOS_MTL_CH1_TX_OP_MODE_IDX 19U -#define EQOS_MTL_CH2_TX_OP_MODE_IDX 20U -#define EQOS_MTL_CH3_TX_OP_MODE_IDX 21U -#define EQOS_MTL_CH4_TX_OP_MODE_IDX 22U -#define EQOS_MTL_CH5_TX_OP_MODE_IDX 23U -#define EQOS_MTL_CH6_TX_OP_MODE_IDX 24U -#define EQOS_MTL_CH7_TX_OP_MODE_IDX 25U -#define EQOS_MTL_TXQ0_QW_IDX 26U -#define EQOS_MTL_TXQ1_QW_IDX 27U -#define EQOS_MTL_TXQ2_QW_IDX 28U -#define EQOS_MTL_TXQ3_QW_IDX 29U -#define EQOS_MTL_TXQ4_QW_IDX 30U -#define EQOS_MTL_TXQ5_QW_IDX 31U -#define EQOS_MTL_TXQ6_QW_IDX 32U -#define EQOS_MTL_TXQ7_QW_IDX 33U -#define EQOS_MTL_CH0_RX_OP_MODE_IDX 34U -#define EQOS_MTL_CH1_RX_OP_MODE_IDX 35U -#define EQOS_MTL_CH2_RX_OP_MODE_IDX 36U -#define EQOS_MTL_CH3_RX_OP_MODE_IDX 37U -#define EQOS_MTL_CH4_RX_OP_MODE_IDX 38U -#define EQOS_MTL_CH5_RX_OP_MODE_IDX 39U -#define EQOS_MTL_CH6_RX_OP_MODE_IDX 40U -#define EQOS_MTL_CH7_RX_OP_MODE_IDX 41U -#define EQOS_MTL_CH8_RX_OP_MODE_IDX 42U -#define EQOS_DMA_SBUS_IDX 43U -#define EQOS_MTL_RXQ_DMA_MAP1_IDX 44U -#define EQOS_MAX_CORE_SAFETY_REGS 45U -/** @} */ - -/** - * @addtogroup EQOS-MTL FRP Indirect Access register defines - * - * @brief EQOS MTL register offsets - * @{ - */ -#define EQOS_MTL_FRP_READ_UDELAY 1U -#define EQOS_MTL_FRP_READ_RETRY 10000U - -/* FRP Control and Status register defines */ -#define EQOS_MTL_RXP_CS_RXPI OSI_BIT(31) -#define EQOS_MTL_RXP_CS_NPE (OSI_BIT(23) | OSI_BIT(22) | \ - OSI_BIT(21) | OSI_BIT(20) | \ - OSI_BIT(19) | OSI_BIT(18) | \ - OSI_BIT(17) | OSI_BIT(16)) -#define EQOS_MTL_RXP_CS_NPE_SHIFT 16U -#define EQOS_MTL_RXP_CS_NVE (OSI_BIT(7) | OSI_BIT(6) | \ - OSI_BIT(5) | OSI_BIT(4) | \ - OSI_BIT(3) | OSI_BIT(2) | \ - OSI_BIT(1) | OSI_BIT(0)) -/* FRP Interrupt Control and Status register */ -#define EQOS_MTL_RXP_INTR_CS_PDRFIE OSI_BIT(19) -#define EQOS_MTL_RXP_INTR_CS_FOOVIE OSI_BIT(18) -#define EQOS_MTL_RXP_INTR_CS_NPEOVIE OSI_BIT(17) -#define EQOS_MTL_RXP_INTR_CS_NVEOVIE OSI_BIT(16) -#define EQOS_MTL_RXP_INTR_CS_PDRFIS OSI_BIT(3) -#define EQOS_MTL_RXP_INTR_CS_FOOVIS OSI_BIT(2) -#define EQOS_MTL_RXP_INTR_CS_NPEOVIS OSI_BIT(1) -#define EQOS_MTL_RXP_INTR_CS_NVEOVIS OSI_BIT(0) -/* Indirect Instruction Table defines */ -#define EQOS_MTL_FRP_IE0(x) ((x) * 0x4U + 0x0U) -#define EQOS_MTL_FRP_IE1(x) ((x) * 0x4U + 0x1U) -#define EQOS_MTL_FRP_IE2(x) ((x) * 0x4U + 0x2U) -#define EQOS_MTL_FRP_IE3(x) ((x) * 0x4U + 0x3U) -#define EQOS_MTL_FRP_IE2_DCH (OSI_BIT(31) | OSI_BIT(30) | \ - OSI_BIT(29) | OSI_BIT(28) | \ - OSI_BIT(27) | OSI_BIT(26) | \ - OSI_BIT(25) | OSI_BIT(24)) -#define EQOS_MTL_FRP_IE2_DCH_SHIFT 24U -#define EQOS_MTL_FRP_IE2_DCH_MASK 0xFFU -#define EQOS_MTL_FRP_IE2_OKI (OSI_BIT(23) | OSI_BIT(22) | \ - OSI_BIT(21) | OSI_BIT(20) | \ - OSI_BIT(19) | OSI_BIT(18) | \ - OSI_BIT(17) | OSI_BIT(16)) -#define EQOS_MTL_FRP_IE2_OKI_SHIFT 16U -#define EQOS_MTL_FRP_IE2_FO (OSI_BIT(13) | OSI_BIT(12) | \ - OSI_BIT(11) | OSI_BIT(10) | \ - OSI_BIT(9) | OSI_BIT(8)) -#define EQOS_MTL_FRP_IE2_FO_SHIFT 8U -#define EQOS_MTL_FRP_IE2_NC OSI_BIT(3) -#define EQOS_MTL_FRP_IE2_IM OSI_BIT(2) -#define EQOS_MTL_FRP_IE2_RF OSI_BIT(1) -#define EQOS_MTL_FRP_IE2_AF OSI_BIT(0) -/* Indirect register defines */ -#define EQOS_MTL_RXP_IND_CS_BUSY OSI_BIT(31) -#define EQOS_MTL_RXP_IND_CS_RXPEIEC (OSI_BIT(22) | OSI_BIT(21)) -#define EQOS_MTL_RXP_IND_CS_RXPEIEE OSI_BIT(20) -#define EQOS_MTL_RXP_IND_CS_WRRDN OSI_BIT(16) -#define EQOS_MTL_RXP_IND_CS_ADDR (OSI_BIT(9) | OSI_BIT(8) | \ - OSI_BIT(7) | OSI_BIT(6) | \ - OSI_BIT(5) | OSI_BIT(4) | \ - OSI_BIT(3) | OSI_BIT(2) | \ - OSI_BIT(1) | OSI_BIT(0)) -/** @} */ - -/** - * @brief core_func_safety - Struct used to store last written values of - * critical core HW registers. - */ -struct core_func_safety { - /** Array of reg MMIO addresses (base of EQoS + offset of reg) */ - void *reg_addr[EQOS_MAX_CORE_SAFETY_REGS]; - /** Array of bit-mask value of each corresponding reg - * (used to ignore self-clearing/reserved bits in reg) */ - nveu32_t reg_mask[EQOS_MAX_CORE_SAFETY_REGS]; - /** Array of value stored in each corresponding register */ - nveu32_t reg_val[EQOS_MAX_CORE_SAFETY_REGS]; - /** OSI lock variable used to protect writes to reg while - * validation is in-progress */ - nveu32_t core_safety_lock; -}; - -/** - * @addtogroup EQOS_HW EQOS HW BACKUP registers - * - * @brief Definitions related to taking backup of EQOS core registers. - * @{ - */ - -/* Hardware Register offsets to be backed up during suspend. - * - * Do not change the order of these macros. To add new registers to be - * backed up, append to end of list before EQOS_MAX_MAC_BAK_IDX, and - * update EQOS_MAX_MAC_BAK_IDX based on new macro. - */ -#define EQOS_MAC_MCR_BAK_IDX 0U -#define EQOS_MAC_EXTR_BAK_IDX ((EQOS_MAC_MCR_BAK_IDX + 1U)) -#define EQOS_MAC_PFR_BAK_IDX ((EQOS_MAC_EXTR_BAK_IDX + 1U)) -#define EQOS_MAC_VLAN_TAG_BAK_IDX ((EQOS_MAC_PFR_BAK_IDX + 1U)) -#define EQOS_MAC_VLANTIR_BAK_IDX ((EQOS_MAC_VLAN_TAG_BAK_IDX + 1U)) -#define EQOS_MAC_RX_FLW_CTRL_BAK_IDX ((EQOS_MAC_VLANTIR_BAK_IDX + 1U)) -#define EQOS_MAC_RQC0R_BAK_IDX ((EQOS_MAC_RX_FLW_CTRL_BAK_IDX + 1U)) -#define EQOS_MAC_RQC1R_BAK_IDX ((EQOS_MAC_RQC0R_BAK_IDX + 1U)) -#define EQOS_MAC_RQC2R_BAK_IDX ((EQOS_MAC_RQC1R_BAK_IDX + 1U)) -#define EQOS_MAC_ISR_BAK_IDX ((EQOS_MAC_RQC2R_BAK_IDX + 1U)) -#define EQOS_MAC_IMR_BAK_IDX ((EQOS_MAC_ISR_BAK_IDX + 1U)) -#define EQOS_MAC_PMTCSR_BAK_IDX ((EQOS_MAC_IMR_BAK_IDX + 1U)) -#define EQOS_MAC_LPI_CSR_BAK_IDX ((EQOS_MAC_PMTCSR_BAK_IDX + 1U)) -#define EQOS_MAC_LPI_TIMER_CTRL_BAK_IDX ((EQOS_MAC_LPI_CSR_BAK_IDX + 1U)) -#define EQOS_MAC_LPI_EN_TIMER_BAK_IDX ((EQOS_MAC_LPI_TIMER_CTRL_BAK_IDX + 1U)) -#define EQOS_MAC_ANS_BAK_IDX ((EQOS_MAC_LPI_EN_TIMER_BAK_IDX + 1U)) -#define EQOS_MAC_PCS_BAK_IDX ((EQOS_MAC_ANS_BAK_IDX + 1U)) -#define EQOS_5_00_MAC_ARPPA_BAK_IDX ((EQOS_MAC_PCS_BAK_IDX + 1U)) -#define EQOS_MMC_CNTRL_BAK_IDX ((EQOS_5_00_MAC_ARPPA_BAK_IDX + 1U)) -#define EQOS_4_10_MAC_ARPPA_BAK_IDX ((EQOS_MMC_CNTRL_BAK_IDX + 1U)) -#define EQOS_MAC_TCR_BAK_IDX ((EQOS_4_10_MAC_ARPPA_BAK_IDX + 1U)) -#define EQOS_MAC_SSIR_BAK_IDX ((EQOS_MAC_TCR_BAK_IDX + 1U)) -#define EQOS_MAC_STSR_BAK_IDX ((EQOS_MAC_SSIR_BAK_IDX + 1U)) -#define EQOS_MAC_STNSR_BAK_IDX ((EQOS_MAC_STSR_BAK_IDX + 1U)) -#define EQOS_MAC_STSUR_BAK_IDX ((EQOS_MAC_STNSR_BAK_IDX + 1U)) -#define EQOS_MAC_STNSUR_BAK_IDX ((EQOS_MAC_STSUR_BAK_IDX + 1U)) -#define EQOS_MAC_TAR_BAK_IDX ((EQOS_MAC_STNSUR_BAK_IDX + 1U)) -#define EQOS_DMA_BMR_BAK_IDX ((EQOS_MAC_TAR_BAK_IDX + 1U)) -#define EQOS_DMA_SBUS_BAK_IDX ((EQOS_DMA_BMR_BAK_IDX + 1U)) -#define EQOS_DMA_ISR_BAK_IDX ((EQOS_DMA_SBUS_BAK_IDX + 1U)) -#define EQOS_MTL_OP_MODE_BAK_IDX ((EQOS_DMA_ISR_BAK_IDX + 1U)) -#define EQOS_MTL_RXQ_DMA_MAP0_BAK_IDX ((EQOS_MTL_OP_MODE_BAK_IDX + 1U)) -/* x varies from 0-7, 8 HTR registers total */ -#define EQOS_MAC_HTR_REG_BAK_IDX(x) ((EQOS_MTL_RXQ_DMA_MAP0_BAK_IDX + 1U + \ - (x))) -/* x varies from 0-7, 8 queues total */ -#define EQOS_MAC_QX_TX_FLW_CTRL_BAK_IDX(x) ((EQOS_MAC_HTR_REG_BAK_IDX(0U) \ - + EQOS_MAX_HTR_REGS + (x))) -/* x varies from 0-127, 128 L2 DA/SA filters total */ -#define EQOS_MAC_ADDRH_BAK_IDX(x) ((EQOS_MAC_QX_TX_FLW_CTRL_BAK_IDX(0U) \ - + OSI_EQOS_MAX_NUM_QUEUES + (x))) -#define EQOS_MAC_ADDRL_BAK_IDX(x) ((EQOS_MAC_ADDRH_BAK_IDX(0U) + \ - EQOS_MAX_MAC_ADDRESS_FILTER + (x))) -/* x varies from 0-7, 8 L3/L4 filters total */ -#define EQOS_MAC_L3L4_CTR_BAK_IDX(x) ((EQOS_MAC_ADDRL_BAK_IDX(0U) + \ - EQOS_MAX_MAC_ADDRESS_FILTER + (x))) -#define EQOS_MAC_L4_ADR_BAK_IDX(x) ((EQOS_MAC_L3L4_CTR_BAK_IDX(0U) + \ - EQOS_MAX_L3_L4_FILTER + (x))) -#define EQOS_MAC_L3_AD0R_BAK_IDX(x) ((EQOS_MAC_L4_ADR_BAK_IDX(0U) + \ - EQOS_MAX_L3_L4_FILTER + (x))) -#define EQOS_MAC_L3_AD1R_BAK_IDX(x) ((EQOS_MAC_L3_AD0R_BAK_IDX(0U) + \ - EQOS_MAX_L3_L4_FILTER + (x))) -#define EQOS_MAC_L3_AD2R_BAK_IDX(x) ((EQOS_MAC_L3_AD1R_BAK_IDX(0U) + \ - EQOS_MAX_L3_L4_FILTER + (x))) -#define EQOS_MAC_L3_AD3R_BAK_IDX(x) ((EQOS_MAC_L3_AD2R_BAK_IDX(0U) + \ - EQOS_MAX_L3_L4_FILTER + (x))) - -/* MTL HW Register offsets - * - * Do not change the order of these macros. To add new registers to be - * backed up, append to end of list before EQOS_MAX_MTL_BAK_IDX, and - * update EQOS_MAX_MTL_BAK_IDX based on new macro. - */ -/* x varies from 0-7, 8 queues total */ -#define EQOS_MTL_CHX_TX_OP_MODE_BAK_IDX(x) ((EQOS_MAC_L3_AD3R_BAK_IDX(0U) \ - + EQOS_MAX_L3_L4_FILTER + (x))) -#define EQOS_MTL_TXQ_ETS_CR_BAK_IDX(x) ((EQOS_MTL_CHX_TX_OP_MODE_BAK_IDX(0U) \ - + OSI_EQOS_MAX_NUM_QUEUES + (x))) -#define EQOS_MTL_TXQ_QW_BAK_IDX(x) ((EQOS_MTL_TXQ_ETS_CR_BAK_IDX(0U) + \ - OSI_EQOS_MAX_NUM_QUEUES + (x))) -#define EQOS_MTL_TXQ_ETS_SSCR_BAK_IDX(x) ((EQOS_MTL_TXQ_QW_BAK_IDX(0U) \ - + OSI_EQOS_MAX_NUM_QUEUES + \ - (x))) -#define EQOS_MTL_TXQ_ETS_HCR_BAK_IDX(x) ((EQOS_MTL_TXQ_ETS_SSCR_BAK_IDX(0U) + \ - OSI_EQOS_MAX_NUM_QUEUES + (x))) -#define EQOS_MTL_TXQ_ETS_LCR_BAK_IDX(x) ((EQOS_MTL_TXQ_ETS_HCR_BAK_IDX(0U) + \ - OSI_EQOS_MAX_NUM_QUEUES + (x))) -#define EQOS_MTL_CHX_RX_OP_MODE_BAK_IDX(x) \ - ((EQOS_MTL_TXQ_ETS_LCR_BAK_IDX(0U) + \ - OSI_EQOS_MAX_NUM_QUEUES + (x))) - -/* EQOS Wrapper register offsets to be saved during suspend - * - * Do not change the order of these macros. To add new registers to be - * backed up, append to end of list before EQOS_MAX_WRAPPER_BAK_IDX, - * and update EQOS_MAX_WRAPPER_BAK_IDX based on new macro. - */ -#define EQOS_CLOCK_CTRL_0_BAK_IDX ((EQOS_MTL_CHX_RX_OP_MODE_BAK_IDX(0U) \ - + OSI_EQOS_MAX_NUM_QUEUES)) -#define EQOS_AXI_ASID_CTRL_BAK_IDX ((EQOS_CLOCK_CTRL_0_BAK_IDX + 1U)) -#define EQOS_PAD_CRTL_BAK_IDX ((EQOS_AXI_ASID_CTRL_BAK_IDX + 1U)) -#define EQOS_PAD_AUTO_CAL_CFG_BAK_IDX ((EQOS_PAD_CRTL_BAK_IDX + 1U)) -/* EQOS_PAD_AUTO_CAL_STAT is Read-only. Skip backup/restore */ - -/* To add new registers to backup during suspend, and restore during resume - * add it before this line, and increment EQOS_MAC_BAK_IDX accordingly. - */ - -#ifndef OSI_STRIPPED_LIB -#define EQOS_MAX_BAK_IDX ((EQOS_PAD_AUTO_CAL_CFG_BAK_IDX + 1U)) -#endif /* !OSI_STRIPPED_LIB */ -/** @} */ - /** * @addtogroup EQOS-MAC-Feature EQOS MAC HW feature registers bit fields * @@ -1041,9 +714,6 @@ struct core_func_safety { #define EQOS_MAC_HFR3_DVLAN_MASK 0x1U #define EQOS_MAC_HFR3_DVLAN_SHIFT 5U -#define EQOS_MAC_HFR3_PDUPSEL_MASK 0x1U -#define EQOS_MAC_HFR3_PDUPSEL_SHIFT 9U - #define EQOS_MAC_HFR3_FRPSEL_MASK 0x1U #define EQOS_MAC_HFR3_FRPSEL_SHIFT 10U @@ -1116,12 +786,16 @@ struct core_func_safety { #define EQOS_TMR_SHIFT 0U #define EQOS_TMR_MASK 0x3FFU #define EQOS_MAC_FSM_CONTROL 0x148U -#define EQOS_TMOUTEN OSI_BIT(0) #define EQOS_PRTYEN OSI_BIT(1) #define EQOS_MAC_DPP_FSM_INTERRUPT_STATUS 0x140U #define EQOS_MTL_DPP_CONTROL 0xCE0U #define EQOS_EDPP OSI_BIT(0) #define EQOS_MAC_DPP_FSM_INTERRUPT_STATUS 0x140U +#define EQOS_MTL_DBG_CTL 0xC08U +#define EQOS_MTL_DBG_CTL_EIEC OSI_BIT(18) +#define EQOS_MTL_DBG_CTL_EIEE OSI_BIT(16) +#define EQOS_MTL_DPP_ECC_EIC 0xCE4U +#define EQOS_MTL_DPP_ECC_EIC_BLEI OSI_BIT(0) /** @} */ #endif diff --git a/kernel/nvethernetrm/osi/core/eqos_mmc.c b/kernel/nvethernetrm/osi/core/eqos_mmc.c index e0de057a46..ab4f78cb90 100644 --- a/kernel/nvethernetrm/osi/core/eqos_mmc.c +++ b/kernel/nvethernetrm/osi/core/eqos_mmc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -54,7 +54,7 @@ static inline nveu64_t update_mmc_val(struct osi_core_priv_data *const osi_core, nveu64_t last_value, nveu64_t offset) { - nveu64_t temp; + nveu64_t temp = 0; nveu32_t value = osi_readla(osi_core, (nveu8_t *)osi_core->base + offset); @@ -65,11 +65,9 @@ static inline nveu64_t update_mmc_val(struct osi_core_priv_data *const osi_core, "Value overflow resetting all counters\n", (nveul64_t)offset); eqos_reset_mmc(osi_core); - } else { - return temp; } - return 0; + return temp; } /** diff --git a/kernel/nvethernetrm/osi/core/frp.c b/kernel/nvethernetrm/osi/core/frp.c index 4b0c9535cc..2dea1835f9 100644 --- a/kernel/nvethernetrm/osi/core/frp.c +++ b/kernel/nvethernetrm/osi/core/frp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -33,7 +33,7 @@ * */ static void frp_entry_copy(struct osi_core_frp_entry *dst, - struct osi_core_frp_entry *src) + struct osi_core_frp_entry *const src) { dst->frp_id = src->frp_id; dst->data.match_data = src->data.match_data; @@ -61,13 +61,14 @@ static void frp_entry_copy(struct osi_core_frp_entry *dst, * @retval 0 on success. * @retval -1 on failure. */ -static int frp_entry_find(struct osi_core_priv_data *const osi_core, - int frp_id, - unsigned char *start, - unsigned char *no_entries) +static nve32_t frp_entry_find(struct osi_core_priv_data *const osi_core, + nve32_t frp_id, + nveu8_t *start, + nveu8_t *no_entries) { - unsigned char count = OSI_NONE, found = OSI_NONE; + nveu8_t count = OSI_NONE, found = OSI_NONE; struct osi_core_frp_entry *entry = OSI_NULL; + nve32_t ret = 0; /* Parse the FRP table for give frp_id */ for (count = 0U; count < osi_core->frp_cnt; count++) { @@ -80,17 +81,17 @@ static int frp_entry_find(struct osi_core_priv_data *const osi_core, found = OSI_ENABLE; } else { /* Increment entries */ - *no_entries = (unsigned char) (*no_entries + 1U); + *no_entries = (nveu8_t)(*no_entries + 1U); } } } if (found == OSI_NONE) { /* No entry found return error */ - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -104,34 +105,38 @@ static int frp_entry_find(struct osi_core_priv_data *const osi_core, * * @retval No of FRP entries required. */ -static unsigned char frp_req_entries(unsigned char offset, - unsigned char match_length) +static nveu8_t frp_req_entries(nveu8_t offset, + nveu8_t match_length) { - unsigned char req = 0U; + nveu8_t req = 0U; + nveu8_t temp_match_length = match_length; - /* Validate for match_length */ - if ((match_length == OSI_NONE) || - (match_length > OSI_FRP_MATCH_DATA_MAX)) { + /* Validate for temp_match_length */ + if ((temp_match_length == OSI_NONE) || + (temp_match_length > OSI_FRP_MATCH_DATA_MAX)) { /* return zero */ - return req; + goto done; } /* Check does the given length can fit in fist entry */ - if (match_length <= (unsigned char) FRP_OFFSET_BYTES(offset)) { + if (temp_match_length <= (nveu8_t)FRP_OFFSET_BYTES(offset)) { /* Require one entry */ - return 1U; + req = 1U; + goto done; } /* Initialize req as 1U and decrement length by FRP_OFFSET_BYTES */ req = 1U; - match_length = (unsigned char) (match_length - (unsigned char) FRP_OFFSET_BYTES(offset)); - if ((match_length / FRP_MD_SIZE) < OSI_FRP_MATCH_DATA_MAX) { - req = (unsigned char) (req + (match_length / FRP_MD_SIZE)); - if ((match_length % FRP_MD_SIZE) != OSI_NONE) { + temp_match_length = (nveu8_t)(temp_match_length - + (nveu8_t)FRP_OFFSET_BYTES(offset)); + if ((temp_match_length / FRP_MD_SIZE) < OSI_FRP_MATCH_DATA_MAX) { + req = (nveu8_t)(req + (temp_match_length / FRP_MD_SIZE)); + if ((temp_match_length % FRP_MD_SIZE) != OSI_NONE) { /* Need one more entry */ - req = (unsigned char) (req + 1U); + req = (nveu8_t)(req + 1U); } } +done: return req; } @@ -144,7 +149,7 @@ static unsigned char frp_req_entries(unsigned char offset, * @param[in] data: FRP entry data pointer. * */ -static void frp_entry_mode_parse(unsigned char filter_mode, +static void frp_entry_mode_parse(nveu8_t filter_mode, struct osi_core_frp_data *data) { switch (filter_mode) { @@ -189,7 +194,7 @@ static void frp_entry_mode_parse(unsigned char filter_mode, data->inverse_match = OSI_DISABLE; break; default: - //OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + //OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, // "Invalid filter mode argment\n", // filter_mode); break; @@ -205,6 +210,7 @@ static void frp_entry_mode_parse(unsigned char filter_mode, * * @param[in] osi_core: OSI core private data structure. * @param[in] frp_id: FRP ID to add. + * @param[in] pos: FRP entry position. * @param[in] match: Pointer to match data. * @param[in] length: Match data length. * @param[in] offset: Actual match data offset position. @@ -215,30 +221,34 @@ static void frp_entry_mode_parse(unsigned char filter_mode, * @retval 0 on success. * @retval -1 on failure. */ -static int frp_entry_add(struct osi_core_priv_data *const osi_core, - int frp_id, - unsigned char pos, - unsigned char *const match, - unsigned char length, - unsigned char offset, - unsigned char filter_mode, - int next_frp_id, - unsigned int dma_sel) +static nve32_t frp_entry_add(struct osi_core_priv_data *const osi_core, + nve32_t frp_id, + nveu8_t pos, + nveu8_t *const match, + nveu8_t length, + nveu8_t offset, + nveu8_t filter_mode, + nve32_t next_frp_id, + nveu32_t dma_sel) { struct osi_core_frp_entry *entry = OSI_NULL; struct osi_core_frp_data *data = OSI_NULL; - unsigned int req_entries = 0U; - unsigned char ok_index = 0U; - unsigned char fo_t = 0U; - unsigned char fp_t = 0U; - unsigned char i = 0U, j = 0U, md_pos = 0U; + nveu32_t req_entries = 0U; + nveu8_t ok_index = 0U; + nveu8_t fo_t = 0U; + nveu8_t fp_t = 0U; + nveu8_t i = 0U, j = 0U, md_pos = 0U; + nveu8_t temp_pos = pos; + nve32_t ret; + nveu32_t dma_sel_val[MAX_MAC_IP_TYPES] = {0xFFU, 0x3FF}; /* Validate length */ if (length > OSI_FRP_MATCH_DATA_MAX) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "Invalid match length\n", length); - return -1; + ret = -1; + goto done; } /* Validate filter_mode */ @@ -246,7 +256,8 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid filter mode argment\n", filter_mode); - return -1; + ret = -1; + goto done; } /* Validate offset */ @@ -254,27 +265,38 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid offset value\n", offset); - return -1; + ret = -1; + goto done; + } + + /* Validate channel selection */ + if (dma_sel > dma_sel_val[osi_core->mac]) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid DMA selection\n", + (nveu64_t)dma_sel); + ret = -1; + goto done; } /* Check for avilable space */ req_entries = frp_req_entries(offset, length); if ((req_entries >= OSI_FRP_MAX_ENTRY) || - (req_entries + pos) >= OSI_FRP_MAX_ENTRY) { + ((req_entries + temp_pos) >= OSI_FRP_MAX_ENTRY)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "No space to update FRP ID\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Validate next_frp_id index ok_index */ - if (filter_mode == OSI_FRP_MODE_LINK || - filter_mode == OSI_FRP_MODE_IM_LINK) { + if ((filter_mode == OSI_FRP_MODE_LINK) || + (filter_mode == OSI_FRP_MODE_IM_LINK)) { if (frp_entry_find(osi_core, next_frp_id, &i, &j) < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "No Link FRP ID index found\n", OSI_NONE); - i = (unsigned char) next_frp_id; + i = (nveu8_t)next_frp_id; } ok_index = i; } @@ -285,7 +307,7 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, md_pos = 0U; for (i = 0U; i < req_entries; i++) { /* Get FRP entry*/ - entry = &osi_core->frp_table[pos]; + entry = &osi_core->frp_table[temp_pos]; data = &entry->data; /* Fill FRP ID */ @@ -295,9 +317,9 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, data->match_data = OSI_NONE; data->match_en = OSI_NONE; for (j = fp_t; j < FRP_MD_SIZE; j++) { - data->match_data |= ((unsigned int)match[md_pos]) + data->match_data |= ((nveu32_t)match[md_pos]) << (j * FRP_ME_BYTE_SHIFT); - data->match_en |= ((unsigned int)FRP_ME_BYTE << + data->match_en |= ((nveu32_t)FRP_ME_BYTE << (j * FRP_ME_BYTE_SHIFT)); md_pos++; if (md_pos >= length) { @@ -323,10 +345,10 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, data->next_ins_ctrl = OSI_ENABLE; /* Init next FRP entry */ - pos++; + temp_pos++; fo_t++; fp_t = OSI_NONE; - data->ok_index = pos; + data->ok_index = temp_pos; } else { data->next_ins_ctrl = OSI_DISABLE; data->ok_index = OSI_DISABLE; @@ -334,14 +356,16 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, } /* Check and fill final OKI */ - if (filter_mode == OSI_FRP_MODE_LINK || - filter_mode == OSI_FRP_MODE_IM_LINK) { + if ((filter_mode == OSI_FRP_MODE_LINK) || + (filter_mode == OSI_FRP_MODE_IM_LINK)) { /* Update NIC and OKI in final entry */ data->next_ins_ctrl = OSI_ENABLE; data->ok_index = ok_index; } - return 0; + ret = 0; +done: + return ret; } /** @@ -350,16 +374,19 @@ static int frp_entry_add(struct osi_core_priv_data *const osi_core, * Algorithm: Update FRP table into HW. * * @param[in] osi_core: OSI core private data structure. + * @param[in] ops_p: Core operations data structure. * * @retval 0 on success. * @retval -1 on failure. */ -static int frp_hw_write(struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p) +nve32_t frp_hw_write(struct osi_core_priv_data *const osi_core, + struct core_ops *const ops_p) { - int ret = -1, tmp = -1; + nve32_t ret = 0; + nve32_t tmp = 0; struct osi_core_frp_entry *entry; - unsigned int frp_cnt = osi_core->frp_cnt, i = OSI_NONE; + struct osi_core_frp_data bypass_entry = {}; + nveu32_t frp_cnt = osi_core->frp_cnt, i = OSI_NONE; /* Disable the FRP in HW */ ret = ops_p->config_frp(osi_core, OSI_DISABLE); @@ -371,29 +398,55 @@ static int frp_hw_write(struct osi_core_priv_data *const osi_core, goto hw_write_enable_frp; } - /* Write FRP entries into HW */ - for (i = 0; i < frp_cnt; i++) { - entry = &osi_core->frp_table[i]; - ret = ops_p->update_frp_entry(osi_core, i, &entry->data); + /* Check space for XCS BYPASS rule */ + if ((frp_cnt + 1U) > OSI_FRP_MAX_ENTRY) { + ret = -1; + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "No space for rules\n", OSI_NONE); + goto error; + } + + /* Check HW table size for non-zero */ + if (frp_cnt != 0U) { + /* Write FRP entries into HW */ + for (i = 0; i < frp_cnt; i++) { + entry = &osi_core->frp_table[i]; + ret = ops_p->update_frp_entry(osi_core, i, + &entry->data); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Fail to update FRP entry\n", + OSI_NONE); + goto hw_write_enable_frp; + } + } + + /* Write BYPASS rule for XDCS */ + bypass_entry.match_en = 0x0U; + bypass_entry.accept_frame = 1; + bypass_entry.reject_frame = 1; + ret = ops_p->update_frp_entry(osi_core, frp_cnt, &bypass_entry); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, - "Fail to update FRP entry\n", + "Fail to update BYPASS entry\n", OSI_NONE); goto hw_write_enable_frp; } - } - /* Update the NVE */ - ret = ops_p->update_frp_nve(osi_core, (frp_cnt - 1U)); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, - "Fail to update FRP NVE\n", - OSI_NONE); - } + /* Update the NVE */ + ret = ops_p->update_frp_nve(osi_core, frp_cnt); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Fail to update FRP NVE\n", + OSI_NONE); + } - /* Enable the FRP in HW */ + /* Enable the FRP in HW */ hw_write_enable_frp: - tmp = ops_p->config_frp(osi_core, OSI_ENABLE); + tmp = ops_p->config_frp(osi_core, OSI_ENABLE); + } + +error: return (ret < 0) ? ret : tmp; } @@ -409,17 +462,17 @@ static int frp_hw_write(struct osi_core_priv_data *const osi_core, * @retval 0 on success. * @retval -1 on failure. */ -static int frp_add_proto(struct osi_core_priv_data *const osi_core, - struct osi_core_frp_cmd *const cmd, - unsigned char *pos) +static nve32_t frp_add_proto(struct osi_core_priv_data *const osi_core, + struct osi_core_frp_cmd *const cmd, + nveu8_t *pos) { - int ret = -1, proto_oki = -1; - unsigned char proto_entry = OSI_DISABLE; - unsigned char req = 0U; - unsigned char proto_match[FRP_PROTO_LENGTH]; - unsigned char proto_lendth; - unsigned char proto_offset; - unsigned char match_type = cmd->match_type; + nve32_t ret, proto_oki; + nveu8_t proto_entry = OSI_DISABLE; + nveu8_t req = 0U; + nveu8_t proto_match[FRP_PROTO_LENGTH]; + nveu8_t proto_lendth; + nveu8_t proto_offset; + nveu8_t match_type = cmd->match_type; switch (match_type) { case OSI_FRP_MATCH_L4_S_UPORT: @@ -462,16 +515,18 @@ static int frp_add_proto(struct osi_core_priv_data *const osi_core, /* Check and Add protocol FRP entire */ if (proto_entry == OSI_ENABLE) { /* Check for space */ - req = (unsigned char) (frp_req_entries(cmd->offset, cmd->match_length) + 1U); + req = (nveu8_t)(frp_req_entries(cmd->offset, cmd->match_length) + 1U); if (*pos > (OSI_FRP_MAX_ENTRY - req)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail add FRP protocol entry\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Add protocol FRP entire */ - proto_oki = *pos + 1; + proto_oki = (nve32_t)*pos; + proto_oki += 1; ret = frp_entry_add(osi_core, cmd->frp_id, *pos, proto_match, proto_lendth, proto_offset, OSI_FRP_MODE_LINK, @@ -480,14 +535,16 @@ static int frp_add_proto(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail add FRP protocol entry\n", OSI_NONE); - return ret; + goto done; } /* Increment pos value */ - *pos = (unsigned char) (*pos + 1U); + *pos = (nveu8_t)(*pos + (nveu8_t)1); } - return 0; + ret = 0; +done: + return ret; } /** @@ -495,15 +552,13 @@ static int frp_add_proto(struct osi_core_priv_data *const osi_core, * * Algorithm: Parse give FRP command match type and update it's offset. * - * @param[in] osi_core: OSI core private data structure. * @param[in] cmd: OSI FRP command structure. * */ -static void frp_parse_mtype(OSI_UNUSED struct osi_core_priv_data *const osi_core, - struct osi_core_frp_cmd *const cmd) +static void frp_parse_mtype(struct osi_core_frp_cmd *const cmd) { - unsigned char offset; - unsigned char match_type = cmd->match_type; + nveu8_t offset; + nveu8_t match_type = cmd->match_type; switch (match_type) { case OSI_FRP_MATCH_L2_DA: @@ -549,26 +604,28 @@ static void frp_parse_mtype(OSI_UNUSED struct osi_core_priv_data *const osi_core * Algorithm: Parse give FRP delete command and update it on OSI data and HW. * * @param[in] osi_core: OSI core private data structure. + * @param[in] ops_p: Core operations data structure. * @param[in] cmd: OSI FRP command structure. * * @retval 0 on success. * @retval -1 on failure. */ -static int frp_delete(struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_core_frp_cmd *const cmd) +static nve32_t frp_delete(struct osi_core_priv_data *const osi_core, + struct core_ops *ops_p, + struct osi_core_frp_cmd *const cmd) { - int ret = -1; - unsigned char i = 0U, pos = 0U, count = 0U; - int frp_id = cmd->frp_id; - unsigned int frp_cnt = osi_core->frp_cnt; + nve32_t ret; + nveu8_t i = 0U, pos = 0U, count = 0U; + nve32_t frp_id = cmd->frp_id; + nveu32_t frp_cnt = osi_core->frp_cnt; /* Check for FRP entries */ if (frp_cnt == 0U) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "No FRP entries in the table\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Find the FRP entry */ @@ -576,15 +633,17 @@ static int frp_delete(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "No FRP entry found to delete\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Validate pos and count */ - if (((unsigned int)pos + count) > frp_cnt) { + if (((nveu32_t)pos + count) > frp_cnt) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Invalid FRP entry index\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Update the frp_table entry */ @@ -592,12 +651,15 @@ static int frp_delete(struct osi_core_priv_data *const osi_core, (sizeof(struct osi_core_frp_entry) * count)); /* Move in FRP table entries by count */ - for (i = (unsigned char) (pos + count); i <= frp_cnt; i++) { + for (i = (nveu8_t)(pos + count); i <= frp_cnt; i++) { frp_entry_copy(&osi_core->frp_table[pos], &osi_core->frp_table[i]); pos++; } + /* Update the frp_cnt entry */ + osi_core->frp_cnt = (frp_cnt - count); + /* Write FRP Table into HW */ ret = frp_hw_write(osi_core, ops_p); if (ret < 0) { @@ -606,9 +668,7 @@ static int frp_delete(struct osi_core_priv_data *const osi_core, OSI_NONE); } - /* Update the frp_cnt entry */ - osi_core->frp_cnt = (frp_cnt - count); - +done: return ret; } @@ -618,29 +678,31 @@ static int frp_delete(struct osi_core_priv_data *const osi_core, * Algorithm: Parse give FRP update command and update it on OSI data and HW. * * @param[in] osi_core: OSI core private data structure. + * @param[in] ops_p: Core operations data structure. * @param[in] cmd: OSI FRP command structure. * * @retval 0 on success. * @retval -1 on failure. */ -static int frp_update(struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_core_frp_cmd *const cmd) +static nve32_t frp_update(struct osi_core_priv_data *const osi_core, + struct core_ops *ops_p, + struct osi_core_frp_cmd *const cmd) { - int ret = -1; - unsigned char pos = 0U, count = 0U, req = 0U; - int frp_id = cmd->frp_id; + nve32_t ret; + nveu8_t pos = 0U, count = 0U, req = 0U; + nve32_t frp_id = cmd->frp_id; /* Validate given frp_id */ if (frp_entry_find(osi_core, frp_id, &pos, &count) < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "No FRP entry found\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Parse match type and update command offset */ - frp_parse_mtype(osi_core, cmd); + frp_parse_mtype(cmd); /* Calculate the required FRP entries for Update Command. */ req = frp_req_entries(cmd->offset, cmd->match_length); @@ -662,7 +724,8 @@ static int frp_update(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Old and New required FRP entries mismatch\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Process and update FRP Command Protocal Entry */ @@ -671,7 +734,7 @@ static int frp_update(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to parse match type\n", OSI_NONE); - return ret; + goto done; } /* Update FRP entries */ @@ -683,7 +746,7 @@ static int frp_update(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to update FRP entry\n", OSI_NONE); - return ret; + goto done; } /* Write FRP Table into HW */ @@ -694,6 +757,7 @@ static int frp_update(struct osi_core_priv_data *const osi_core, OSI_NONE); } +done: return ret; } @@ -703,26 +767,28 @@ static int frp_update(struct osi_core_priv_data *const osi_core, * Algorithm: Parse give FRP Add command and update it on OSI data and HW. * * @param[in] osi_core: OSI core private data structure. + * @param[in] ops_p: Core operations data structure. * @param[in] cmd: OSI FRP command structure. * * @retval 0 on success. * @retval -1 on failure. */ -static int frp_add(struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_core_frp_cmd *const cmd) +static nve32_t frp_add(struct osi_core_priv_data *const osi_core, + struct core_ops *ops_p, + struct osi_core_frp_cmd *const cmd) { - int ret = -1; - unsigned char pos = 0U, count = 0U; - int frp_id = cmd->frp_id; - unsigned int nve = osi_core->frp_cnt; + nve32_t ret; + nveu8_t pos = 0U, count = 0U; + nve32_t frp_id = cmd->frp_id; + nveu32_t nve = osi_core->frp_cnt; /* Check for MAX FRP entries */ if (nve >= OSI_FRP_MAX_ENTRY) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "FRP etries are full\n", nve); - return -1; + ret = -1; + goto done; } /* Check the FRP entry already exists */ @@ -731,23 +797,24 @@ static int frp_add(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "FRP entry already exists\n", OSI_NONE); - return -1; + ret = -1; + goto done; } /* Parse match type and update command offset */ - frp_parse_mtype(osi_core, cmd); + frp_parse_mtype(cmd); /* Process and add FRP Command Protocal Entry */ - ret = frp_add_proto(osi_core, cmd, (unsigned char *)&nve); + ret = frp_add_proto(osi_core, cmd, (nveu8_t *)&nve); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to parse match type\n", OSI_NONE); - return ret; + goto done; } /* Add Match data FRP Entry */ - ret = frp_entry_add(osi_core, frp_id, (unsigned char)nve, + ret = frp_entry_add(osi_core, frp_id, (nveu8_t)nve, cmd->match, cmd->match_length, cmd->offset, cmd->filter_mode, cmd->next_frp_id, cmd->dma_sel); @@ -755,7 +822,7 @@ static int frp_add(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to add FRP entry\n", nve); - return ret; + goto done; } osi_core->frp_cnt = nve + frp_req_entries(cmd->offset, cmd->match_length); @@ -768,6 +835,7 @@ static int frp_add(struct osi_core_priv_data *const osi_core, OSI_NONE); } +done: return ret; } @@ -777,16 +845,17 @@ static int frp_add(struct osi_core_priv_data *const osi_core, * Algorithm: Parse give FRP command and update it on OSI data and HW. * * @param[in] osi_core: OSI core private data structure. + * @param[in] ops_p: Core operations data structure. * @param[in] cmd: OSI FRP command structure. * * @retval 0 on success. * @retval -1 on failure. */ -int setup_frp(struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_core_frp_cmd *const cmd) +nve32_t setup_frp(struct osi_core_priv_data *const osi_core, + struct core_ops *ops_p, + struct osi_core_frp_cmd *const cmd) { - int ret = -1; + nve32_t ret = -1; switch (cmd->cmd) { case OSI_FRP_CMD_ADD: @@ -817,20 +886,3 @@ int setup_frp(struct osi_core_priv_data *const osi_core, return ret; } - -/** - * @brief init_frp - Initialize FRP. - * - * Algorithm: Reset all the data in the FRP table Initialize FRP count to zero. - * - * @param[in] osi_core: OSI core private data structure. - * - */ -void init_frp(struct osi_core_priv_data *const osi_core) -{ - /* Reset the NVE count to zero */ - osi_core->frp_cnt = 0U; - /* Clear all instruction of FRP */ - osi_memset(osi_core->frp_table, 0U, - (sizeof(struct osi_core_frp_entry) * OSI_FRP_MAX_ENTRY)); -} diff --git a/kernel/nvethernetrm/osi/core/frp.h b/kernel/nvethernetrm/osi/core/frp.h index d1092b74ff..0e902c183e 100644 --- a/kernel/nvethernetrm/osi/core/frp.h +++ b/kernel/nvethernetrm/osi/core/frp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -64,21 +64,20 @@ * @retval 0 on success. * @retval -1 on failure. */ -int setup_frp(struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_core_frp_cmd *const cmd); +nve32_t setup_frp(struct osi_core_priv_data *const osi_core, + struct core_ops *ops_p, + struct osi_core_frp_cmd *const cmd); /** - * @brief init_frp - Init the FRP Instruction Table. + * @brief frp_hw_write - Update HW FRP table. * - * @param[in] osi_core: OSI core private data structure. + * Algorithm: Update FRP table into HW. * - * @note - * 1) MAC and PHY should be init and started. see osi_start_mac() + * @param[in] osi_core: OSI core private data structure. * - * @retval 0 on success + * @retval 0 on success. * @retval -1 on failure. */ -void init_frp(struct osi_core_priv_data *const osi_core); - +nve32_t frp_hw_write(struct osi_core_priv_data *const osi_core, + struct core_ops *const ops_p); #endif /* FRP_H */ diff --git a/kernel/nvethernetrm/osi/core/ivc_core.c b/kernel/nvethernetrm/osi/core/ivc_core.c index fe40e26739..555b0231d7 100644 --- a/kernel/nvethernetrm/osi/core/ivc_core.c +++ b/kernel/nvethernetrm/osi/core/ivc_core.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -30,11 +30,6 @@ #include "../osi/common/common.h" #include "macsec.h" -/** - * @brief ivc_safety_config - EQOS MAC core safety configuration - */ -static struct core_func_safety ivc_safety_config; - /** * @brief ivc_handle_ioctl - marshell input argument to handle runtime command * @@ -55,27 +50,40 @@ static nve32_t ivc_handle_ioctl(struct osi_core_priv_data *osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = handle_ioctl; - msg.status = osi_memcpy((void *)&msg.data.ioctl_data, - (void *)data, - sizeof(struct osi_ioctl)); + /* osi_memcpy is treated as void since it is + * an internal functin which will be always success + */ + (void)osi_memcpy((void *)&msg.data.ioctl_data, (void *)data, + sizeof(struct osi_ioctl)); if (data->cmd == OSI_CMD_CONFIG_PTP) { - osi_memcpy((void *)&msg.data.ioctl_data.ptp_config, - (void *)&osi_core->ptp_config, - sizeof(struct osi_ptp_config)); + (void)osi_memcpy((void *)&msg.data.ioctl_data.ptp_config, + (void *)&osi_core->ptp_config, + sizeof(struct osi_ptp_config)); } ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); - if (data->cmd == OSI_CMD_READ_MMC) { - msg.status = osi_memcpy((void *)&osi_core->mmc, - (void *)&msg.data.mmc, - sizeof(struct osi_mmc_counters)); - } else { - msg.status = osi_memcpy((void *)data, - (void *)&msg.data.ioctl_data, - sizeof(struct osi_ioctl)); + switch (data->cmd) { + case OSI_CMD_READ_MMC: + (void)osi_memcpy((void *)&osi_core->mmc, + (void *)&msg.data.mmc_s, + sizeof(struct osi_mmc_counters)); + break; + + case OSI_CMD_READ_STATS: + (void)osi_memcpy((void *)&osi_core->stats, + (void *)&msg.data.stats_s, + sizeof(struct osi_stats)); + break; + + default: + (void)osi_memcpy((void *)data, + (void *)&msg.data.ioctl_data, + sizeof(struct osi_ioctl)); + break; } + return ret; } @@ -83,15 +91,11 @@ static nve32_t ivc_handle_ioctl(struct osi_core_priv_data *osi_core, * @brief ivc_core_init - EQOS MAC, MTL and common DMA Initialization * * @param[in] osi_core: OSI core private data structure. - * @param[in] tx_fifo_size: MTL TX FIFO size - * @param[in] rx_fifo_size: MTL RX FIFO size * * @retval 0 on success * @retval -1 on failure. */ -static nve32_t ivc_core_init(struct osi_core_priv_data *const osi_core, - OSI_UNUSED nveu32_t tx_fifo_size, - OSI_UNUSED nveu32_t rx_fifo_size) +static nve32_t ivc_core_init(struct osi_core_priv_data *const osi_core) { ivc_msg_common_t msg; @@ -117,8 +121,7 @@ static void ivc_core_deinit(struct osi_core_priv_data *const osi_core) osi_memset(&msg, 0, sizeof(msg)); - msg.cmd = handle_ioctl; - msg.data.ioctl_data.cmd = OSI_CMD_STOP_MAC; + msg.cmd = core_deinit; ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); if (ret < 0) { @@ -151,10 +154,10 @@ static nve32_t ivc_write_phy_reg(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = write_phy_reg; - msg.data.args.arguments[index++] = phyaddr; - msg.data.args.arguments[index++] = phyreg; - msg.data.args.arguments[index++] = phydata; - msg.data.args.count = index; + msg.args.arguments[index++] = phyaddr; + msg.args.arguments[index++] = phyreg; + msg.args.arguments[index++] = phydata; + msg.args.count = index; return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); } @@ -182,14 +185,15 @@ static nve32_t ivc_read_phy_reg(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = read_phy_reg; - msg.data.args.arguments[index++] = phyaddr; - msg.data.args.arguments[index++] = phyreg; - msg.data.args.count = index; + msg.args.arguments[index++] = phyaddr; + msg.args.arguments[index++] = phyreg; + msg.args.count = index; return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); } #ifdef MACSEC_SUPPORT +#ifdef DEBUG_MACSEC /** * @brief ivc_macsec_dbg_events_config - Configure Debug events * @@ -199,7 +203,7 @@ static nve32_t ivc_read_phy_reg(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int ivc_macsec_dbg_events_config( +static nve32_t ivc_macsec_dbg_events_config( struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config) { @@ -210,19 +214,19 @@ static int ivc_macsec_dbg_events_config( msg.cmd = dbg_events_config_macsec; - msg.status = osi_memcpy((void *)&msg.data.dbg_buf_config, - (void *)dbg_buf_config, - sizeof(struct osi_macsec_dbg_buf_config)); + (void)osi_memcpy((void *)&msg.data.dbg_buf_config, + (void *)dbg_buf_config, + sizeof(struct osi_macsec_dbg_buf_config)); ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); if (ret != 0) { - return ret; + goto done; } - msg.status = osi_memcpy((void *)dbg_buf_config, - (void *)&msg.data.dbg_buf_config, - sizeof(struct osi_macsec_dbg_buf_config)); - + (void)osi_memcpy((void *)dbg_buf_config, + (void *)&msg.data.dbg_buf_config, + sizeof(struct osi_macsec_dbg_buf_config)); +done: return ret; } @@ -235,7 +239,7 @@ static int ivc_macsec_dbg_events_config( * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_macsec_dbg_buf_config( +static nve32_t ivc_macsec_dbg_buf_config( struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config) { @@ -246,21 +250,22 @@ static int ivc_macsec_dbg_buf_config( msg.cmd = dbg_buf_config_macsec; - msg.status = osi_memcpy((void *)&msg.data.dbg_buf_config, - (void *)dbg_buf_config, - sizeof(struct osi_macsec_dbg_buf_config)); + (void)osi_memcpy((void *)&msg.data.dbg_buf_config, + (void *)dbg_buf_config, + sizeof(struct osi_macsec_dbg_buf_config)); ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); if (ret != 0) { - return ret; + goto done; } - msg.status = osi_memcpy((void *)dbg_buf_config, - (void *) &msg.data.dbg_buf_config, - sizeof(struct osi_macsec_dbg_buf_config)); - + (void)osi_memcpy((void *)dbg_buf_config, + (void *) &msg.data.dbg_buf_config, + sizeof(struct osi_macsec_dbg_buf_config)); +done: return ret; } +#endif /* DEBUG_MACSEC */ /** * @brief macsec_read_mmc - To read statitics registers and update structure @@ -284,27 +289,26 @@ static void ivc_macsec_read_mmc(struct osi_core_priv_data *const osi_core) msg.status = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); - msg.status = osi_memcpy((void *)&osi_core->macsec_mmc, - (void *) &msg.data.macsec_mmc, - sizeof(struct osi_macsec_mmc_counters)); - msg.status = osi_memcpy((void *)&osi_core->macsec_irq_stats, - (void *) &msg.data.macsec_irq_stats, - sizeof(struct osi_macsec_irq_stats)); + (void)osi_memcpy((void *)&osi_core->macsec_mmc, + (void *) &msg.data.macsec_mmc, + sizeof(struct osi_macsec_mmc_counters)); + (void)osi_memcpy((void *)&osi_core->macsec_irq_stats, + (void *) &msg.data.macsec_irq_stats, + sizeof(struct osi_macsec_irq_stats)); } /** * @brief ivc_get_sc_lut_key_index - Macsec get Key_index * * @param[in] osi_core: OSI Core private data structure. - * @param[in] sc: Secure Channel info. - * @param[in] enable: enable or disable. + * @param[in] sci: Secure Channel info. + * @param[out] key_index: Key table index to program SAK. * @param[in] ctlr: Controller instance. - * @param[[out] kt_idx: Key table index to program SAK. * * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core, +static nve32_t ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core, nveu8_t *sci, nveu32_t *key_index, nveu16_t ctlr) { @@ -314,17 +318,16 @@ static int ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = macsec_get_sc_lut_key_index; - msg.status = osi_memcpy((void *) &msg.data.macsec_cfg.sci, - (void *)sci, - OSI_SCI_LEN); + (void)osi_memcpy((void *) &msg.data.macsec_cfg.sci, + (void *)sci, + OSI_SCI_LEN); msg.data.macsec_cfg.ctlr = ctlr; ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); - if (ret != 0) { - return ret; + if (ret == 0) { + *key_index = msg.data.macsec_cfg.key_index; } - *key_index = msg.data.macsec_cfg.key_index; return ret; } @@ -335,15 +338,15 @@ static int ivc_get_sc_lut_key_index(struct osi_core_priv_data *const osi_core, * @param[in] sc: Secure Channel info. * @param[in] enable: enable or disable. * @param[in] ctlr: Controller instance. - * @param[[out] kt_idx: Key table index to program SAK. + * @param[out] kt_idx: Key table index to program SAK. * * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_macsec_config(struct osi_core_priv_data *const osi_core, +static nve32_t ivc_macsec_config(struct osi_core_priv_data *const osi_core, struct osi_macsec_sc_info *const sc, - unsigned int enable, unsigned short ctlr, - unsigned short *kt_idx) + nveu32_t enable, nveu16_t ctlr, + nveu16_t *kt_idx) { ivc_msg_common_t msg; nve32_t ret = 0; @@ -351,47 +354,23 @@ static int ivc_macsec_config(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = config_macsec; - msg.status = osi_memcpy((void *) &msg.data.macsec_cfg.sc_info, - (void *)sc, - sizeof(struct osi_macsec_sc_info)); + (void)osi_memcpy((void *) &msg.data.macsec_cfg.sc_info, + (void *)sc, + sizeof(struct osi_macsec_sc_info)); msg.data.macsec_cfg.enable = enable; msg.data.macsec_cfg.ctlr = ctlr; msg.data.macsec_cfg.kt_idx = *kt_idx; ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); if (ret != 0) { - return ret; + goto done; } *kt_idx = msg.data.macsec_cfg.kt_idx; +done: return ret; } -/** - * @brief ivc_macsec_update_mtu - Update MACSEC mtu. - * - * @param[in] osi_core: OSI Core private data structure. - * @param[in] mtu: MACSEC MTU len. - * - * @retval 0 on Success - * @retval -1 on Failure - */ -static nve32_t ivc_macsec_update_mtu(struct osi_core_priv_data *const osi_core, - nveu32_t mtu) -{ - ivc_msg_common_t msg; - nveu32_t index = 0; - - osi_memset(&msg, 0, sizeof(msg)); - - msg.cmd = macsec_update_mtu_size; - msg.data.args.arguments[index] = mtu; - index++; - msg.data.args.count = index; - - return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); -} - /** * @brief ivc_macsec_enable - Enable or disable Macsec. * @@ -401,8 +380,8 @@ static nve32_t ivc_macsec_update_mtu(struct osi_core_priv_data *const osi_core, * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_macsec_enable(struct osi_core_priv_data *const osi_core, - unsigned int enable) +static nve32_t ivc_macsec_enable(struct osi_core_priv_data *const osi_core, + nveu32_t enable) { ivc_msg_common_t msg; nveu32_t index = 0; @@ -410,13 +389,14 @@ static int ivc_macsec_enable(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = en_macsec; - msg.data.args.arguments[index] = enable; + msg.args.arguments[index] = enable; index++; - msg.data.args.count = index; + msg.args.count = index; return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); } +#ifdef DEBUG_MACSEC /** * @brief ivc_macsec_loopback_config - Loopback configure. * @@ -426,8 +406,8 @@ static int ivc_macsec_enable(struct osi_core_priv_data *const osi_core, * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_macsec_loopback_config(struct osi_core_priv_data *const osi_core, - unsigned int enable) +static nve32_t ivc_macsec_loopback_config(struct osi_core_priv_data *const osi_core, + nveu32_t enable) { ivc_msg_common_t msg; nveu32_t index = 0; @@ -435,12 +415,13 @@ static int ivc_macsec_loopback_config(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = loopback_config_macsec; - msg.data.args.arguments[index] = enable; + msg.args.arguments[index] = enable; index++; - msg.data.args.count = index; + msg.args.count = index; return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); } +#endif /* DEBUG_MACSEC */ #ifdef MACSEC_KEY_PROGRAM /** @@ -461,18 +442,18 @@ static nve32_t ivc_macsec_kt_config(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = kt_config_macsec; - msg.status = osi_memcpy((void *) &msg.data.kt_config, - (void *)kt_config, - sizeof(struct osi_macsec_kt_config)); + (void)osi_memcpy((void *) &msg.data.kt_config, + (void *)kt_config, + sizeof(struct osi_macsec_kt_config)); ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); if (ret != 0) { return ret; } - msg.status = osi_memcpy((void *)kt_config, - (void *)&msg.data.kt_config, - sizeof(struct osi_macsec_kt_config)); + (void)osi_memcpy((void *)kt_config, + (void *)&msg.data.kt_config, + sizeof(struct osi_macsec_kt_config)); return ret; } #endif /* MACSEC_KEY_PROGRAM */ @@ -486,8 +467,8 @@ static nve32_t ivc_macsec_kt_config(struct osi_core_priv_data *const osi_core, * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_macsec_cipher_config(struct osi_core_priv_data *const osi_core, - unsigned int cipher) +static nve32_t ivc_macsec_cipher_config(struct osi_core_priv_data *const osi_core, + nveu32_t cipher) { ivc_msg_common_t msg; nveu32_t index = 0; @@ -495,9 +476,9 @@ static int ivc_macsec_cipher_config(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = cipher_config; - msg.data.args.arguments[index] = cipher; + msg.args.arguments[index] = cipher; index++; - msg.data.args.count = index; + msg.args.count = index; return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); } @@ -519,48 +500,35 @@ static nve32_t ivc_macsec_lut_config(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = lut_config_macsec; - msg.status = osi_memcpy((void *) &msg.data.lut_config, - (void *)lut_config, - sizeof(struct osi_macsec_lut_config)); + (void)osi_memcpy((void *) &msg.data.lut_config, + (void *)lut_config, + sizeof(struct osi_macsec_lut_config)); ret = osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); if (ret != 0) { - return ret; + goto done; } - msg.status = osi_memcpy((void *)lut_config, - (void *)&msg.data.lut_config, - sizeof(struct osi_macsec_lut_config)); + (void)osi_memcpy((void *)lut_config, + (void *)&msg.data.lut_config, + sizeof(struct osi_macsec_lut_config)); +done: return ret; } /** - * @brief ivc_macsec_handle_s_irq - handle s irq. + * @brief ivc_macsec_handle_irq - handle macsec irq. * * @param[in] osi_core: OSI Core private data structure. * */ -static void ivc_macsec_handle_s_irq(OSI_UNUSED +static void ivc_macsec_handle_irq(OSI_UNUSED struct osi_core_priv_data *const osi_core) { OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, "Nothing to handle \n", 0ULL); } -/** - * @brief ivc_macsec_handle_ns_irq - handle ns irq. - * - * @param[in] osi_core: OSI Core private data structure. - * - */ - -static void ivc_macsec_handle_ns_irq(OSI_UNUSED - struct osi_core_priv_data *const osi_core) -{ - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Nothing to handle \n", 0ULL); -} - /** * @brief ivc_macsec_deinit - De Initialize. * @@ -570,7 +538,7 @@ static void ivc_macsec_handle_ns_irq(OSI_UNUSED * @retval -1 on Failure */ -static int ivc_macsec_deinit(struct osi_core_priv_data *const osi_core) +static nve32_t ivc_macsec_deinit(struct osi_core_priv_data *const osi_core) { ivc_msg_common_t msg; @@ -585,12 +553,12 @@ static int ivc_macsec_deinit(struct osi_core_priv_data *const osi_core) * @brief ivc_macsec_init -Initialize. * * @param[in] osi_core: OSI Core private data structure. - * @param[in] genl_info: Generic netlink information structure. + * @param[in] mtu: mtu to be set. * * @retval 0 on Success * @retval -1 on Failure */ -static int ivc_macsec_init(struct osi_core_priv_data *const osi_core, +static nve32_t ivc_macsec_init(struct osi_core_priv_data *const osi_core, nveu32_t mtu) { ivc_msg_common_t msg; @@ -599,9 +567,9 @@ static int ivc_macsec_init(struct osi_core_priv_data *const osi_core, osi_memset(&msg, 0, sizeof(msg)); msg.cmd = init_macsec; - msg.data.args.arguments[index] = mtu; + msg.args.arguments[index] = mtu; index++; - msg.data.args.count = index; + msg.args.count = index; return osi_core->osd_ops.ivc_send(osi_core, &msg, sizeof(msg)); } @@ -621,32 +589,24 @@ void ivc_init_macsec_ops(void *macsecops) ops->init = ivc_macsec_init; ops->deinit = ivc_macsec_deinit; - ops->handle_ns_irq = ivc_macsec_handle_ns_irq; - ops->handle_s_irq = ivc_macsec_handle_s_irq; + ops->handle_irq = ivc_macsec_handle_irq; ops->lut_config = ivc_macsec_lut_config; #ifdef MACSEC_KEY_PROGRAM ops->kt_config = ivc_macsec_kt_config; #endif /* MACSEC_KEY_PROGRAM */ ops->cipher_config = ivc_macsec_cipher_config; - ops->loopback_config = ivc_macsec_loopback_config; ops->macsec_en = ivc_macsec_enable; ops->config = ivc_macsec_config; ops->read_mmc = ivc_macsec_read_mmc; - ops->dbg_buf_config = ivc_macsec_dbg_buf_config; +#ifdef DEBUG_MACSEC + ops->loopback_config = ivc_macsec_loopback_config; ops->dbg_events_config = ivc_macsec_dbg_events_config; + ops->dbg_buf_config = ivc_macsec_dbg_buf_config; +#endif /* DEBUG_MACSEC */ ops->get_sc_lut_key_index = ivc_get_sc_lut_key_index; - ops->update_mtu = ivc_macsec_update_mtu; } #endif -/** - * @brief ivc_get_core_safety_config - EQOS MAC safety configuration - */ -void *ivc_get_core_safety_config(void) -{ - return &ivc_safety_config; -} - /** * @brief vir_ivc_core_deinit - MAC core deinitialization * diff --git a/kernel/nvethernetrm/osi/core/macsec.c b/kernel/nvethernetrm/osi/core/macsec.c index a1c545f65e..e2f039f0aa 100644 --- a/kernel/nvethernetrm/osi/core/macsec.c +++ b/kernel/nvethernetrm/osi/core/macsec.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,22 +26,23 @@ #include "../osi/common/common.h" #include "core_local.h" -#if defined(DEBUG_MACSEC) && defined(QNX_OS) -#define LOG(...) \ +#if 0 /* Qnx */ +#define MACSEC_LOG(...) \ { \ - slogf(0, 2, ##__VA_ARGS__); \ + slogf(0, 6, ##__VA_ARGS__); \ } -#elif defined(DEBUG_MACSEC) && defined(LINUX_OS) +#elif 0 /* Linux */ #include -#define LOG(...) \ +#define MACSEC_LOG(...) \ { \ - pr_err(##__VA_ARGS__); \ + pr_debug(__VA_ARGS__); \ } #else -#define LOG(...) +#define MACSEC_LOG(...) #endif +#ifdef DEBUG_MACSEC /** * @brief poll_for_dbg_buf_update - Query the status of a debug buffer update. * @@ -70,6 +71,7 @@ static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core nveu32_t retry = RETRY_COUNT; nveu32_t dbg_buf_config; nve32_t cond = COND_NOT_MET; + nve32_t ret = 0; nveu32_t count; count = 0; @@ -77,7 +79,8 @@ static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core if (count > retry) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "timeout!\n", 0ULL); - return -1; + ret = -1; + goto err; } dbg_buf_config = osi_readla(osi_core, @@ -91,8 +94,8 @@ static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core /* wait on UPDATE bit to reset */ osi_core->osd_ops.udelay(RETRY_DELAY); } - - return 0; +err: + return ret; } @@ -194,7 +197,8 @@ static void write_tx_dbg_trigger_evts( nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t flags = 0; - nveu32_t tx_trigger_evts, debug_ctrl_reg; + nveu32_t tx_trigger_evts; + nveu32_t debug_ctrl_reg; flags = dbg_buf_config->flags; tx_trigger_evts = osi_readla(osi_core, @@ -235,7 +239,7 @@ static void write_tx_dbg_trigger_evts( tx_trigger_evts &= ~MACSEC_TX_DBG_CAPTURE; } - LOG("%s: 0x%x", __func__, tx_trigger_evts); + MACSEC_LOG("%s: 0x%x", __func__, tx_trigger_evts); osi_writela(osi_core, tx_trigger_evts, base + MACSEC_TX_DEBUG_TRIGGER_EN_0); if (tx_trigger_evts != OSI_NONE) { @@ -243,7 +247,7 @@ static void write_tx_dbg_trigger_evts( debug_ctrl_reg = osi_readla(osi_core, base + MACSEC_TX_DEBUG_CONTROL_0); debug_ctrl_reg |= MACSEC_TX_DEBUG_CONTROL_0_START_CAP; - LOG("%s: debug_ctrl_reg 0x%x", __func__, + MACSEC_LOG("%s: debug_ctrl_reg 0x%x", __func__, debug_ctrl_reg); osi_writela(osi_core, debug_ctrl_reg, base + MACSEC_TX_DEBUG_CONTROL_0); @@ -280,12 +284,12 @@ static void tx_dbg_trigger_evts( nveu32_t flags = 0; nveu32_t tx_trigger_evts; - if (dbg_buf_config->rw == OSI_DBG_TBL_WRITE) { + if (dbg_buf_config->rw == OSI_LUT_WRITE) { write_tx_dbg_trigger_evts(osi_core, dbg_buf_config); } else { tx_trigger_evts = osi_readla(osi_core, base + MACSEC_TX_DEBUG_TRIGGER_EN_0); - LOG("%s: 0x%x", __func__, tx_trigger_evts); + MACSEC_LOG("%s: 0x%x", __func__, tx_trigger_evts); if ((tx_trigger_evts & MACSEC_TX_DBG_LKUP_MISS) != OSI_NONE) { flags |= OSI_TX_DBG_LKUP_MISS_EVT; } @@ -336,7 +340,8 @@ static void write_rx_dbg_trigger_evts( nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t flags = 0; - nveu32_t rx_trigger_evts = 0, debug_ctrl_reg; + nveu32_t rx_trigger_evts = 0; + nveu32_t debug_ctrl_reg; flags = dbg_buf_config->flags; rx_trigger_evts = osi_readla(osi_core, @@ -376,7 +381,7 @@ static void write_rx_dbg_trigger_evts( } else { rx_trigger_evts &= ~MACSEC_RX_DBG_CAPTURE; } - LOG("%s: 0x%x", __func__, rx_trigger_evts); + MACSEC_LOG("%s: 0x%x", __func__, rx_trigger_evts); osi_writela(osi_core, rx_trigger_evts, base + MACSEC_RX_DEBUG_TRIGGER_EN_0); if (rx_trigger_evts != OSI_NONE) { @@ -384,7 +389,7 @@ static void write_rx_dbg_trigger_evts( debug_ctrl_reg = osi_readla(osi_core, base + MACSEC_RX_DEBUG_CONTROL_0); debug_ctrl_reg |= MACSEC_RX_DEBUG_CONTROL_0_START_CAP; - LOG("%s: debug_ctrl_reg 0x%x", __func__, + MACSEC_LOG("%s: debug_ctrl_reg 0x%x", __func__, debug_ctrl_reg); osi_writela(osi_core, debug_ctrl_reg, base + MACSEC_RX_DEBUG_CONTROL_0); @@ -421,12 +426,12 @@ static void rx_dbg_trigger_evts( nveu32_t flags = 0; nveu32_t rx_trigger_evts = 0; - if (dbg_buf_config->rw == OSI_DBG_TBL_WRITE) { + if (dbg_buf_config->rw == OSI_LUT_WRITE) { write_rx_dbg_trigger_evts(osi_core, dbg_buf_config); } else { rx_trigger_evts = osi_readla(osi_core, base + MACSEC_RX_DEBUG_TRIGGER_EN_0); - LOG("%s: 0x%x", __func__, rx_trigger_evts); + MACSEC_LOG("%s: 0x%x", __func__, rx_trigger_evts); if ((rx_trigger_evts & MACSEC_RX_DBG_LKUP_MISS) != OSI_NONE) { flags |= OSI_RX_DBG_LKUP_MISS_EVT; } @@ -477,12 +482,16 @@ static nve32_t validate_inputs_macsec_dbg_buf_conf( const struct osi_core_priv_data *const osi_core, const struct osi_macsec_dbg_buf_config *const dbg_buf_config) { + nve32_t ret = 0; + + (void) osi_core; /* Validate inputs */ if ((dbg_buf_config->rw > OSI_RW_MAX) || (dbg_buf_config->ctlr_sel > OSI_CTLR_SEL_MAX)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Params validation failed\n", 0ULL); - return -1; + ret = -1; + goto err; } if (((dbg_buf_config->ctlr_sel == OSI_CTLR_SEL_TX) && @@ -491,9 +500,11 @@ static nve32_t validate_inputs_macsec_dbg_buf_conf( (dbg_buf_config->index > OSI_RX_DBG_BUF_IDX_MAX))) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Wrong index \n", dbg_buf_config->index); - return -1; + ret = -1; + goto err; } - return 0; +err: + return ret; } /** @@ -532,7 +543,8 @@ static nve32_t macsec_dbg_buf_config(struct osi_core_priv_data *const osi_core, nve32_t ret = 0; if (validate_inputs_macsec_dbg_buf_conf(osi_core, dbg_buf_config) < 0) { - return -1; + ret = -1; + goto err; } dbg_config_reg = osi_readla(osi_core, base + MACSEC_DEBUG_BUF_CONFIG_0); @@ -557,13 +569,14 @@ static nve32_t macsec_dbg_buf_config(struct osi_core_priv_data *const osi_core, osi_writela(osi_core, dbg_config_reg, base + MACSEC_DEBUG_BUF_CONFIG_0); ret = poll_for_dbg_buf_update(osi_core); if (ret < 0) { - return ret; + goto err; } - if (dbg_buf_config->rw == OSI_NONE) { + if (dbg_buf_config->rw == OSI_LUT_READ) { read_dbg_buf_data(osi_core, dbg_buf_config->dbg_buf); } - return 0; +err: + return ret; } /** @@ -597,17 +610,19 @@ static nve32_t macsec_dbg_events_config( { nveu64_t events = 0; nveu32_t i, flags = dbg_buf_config->flags; + nve32_t ret = 0; /* Validate inputs */ if ((dbg_buf_config->rw > OSI_RW_MAX) || (dbg_buf_config->ctlr_sel > OSI_CTLR_SEL_MAX)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Params validation failed!\n", 0ULL); - return -1; + ret = -1; + goto err; } /* Only one event allowed to configure at a time */ - if ((flags != OSI_NONE) && (dbg_buf_config->rw == OSI_DBG_TBL_WRITE)) { + if ((flags != OSI_NONE) && (dbg_buf_config->rw == OSI_LUT_WRITE)) { for (i = 0; i < 32U; i++) { if ((flags & ((nveu32_t)(1U) << i)) != OSI_NONE) { CERT_C__POST_INC__U64(events); @@ -616,7 +631,8 @@ static nve32_t macsec_dbg_events_config( if (events > 1U) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Don't allow more than one debug events set\n", flags); - return -1; + ret = -1; + goto err; } } @@ -632,9 +648,10 @@ static nve32_t macsec_dbg_events_config( "Unknown controller select\n", 0ULL); break; } - - return 0; +err: + return ret; } +#endif /* DEBUG_MACSEC */ /** * @brief update_macsec_mmc_val - Reads specific macsec mmc counters @@ -662,7 +679,8 @@ static inline nveul64_t update_macsec_mmc_val( struct osi_core_priv_data *osi_core, nveu64_t offset) { - nveul64_t value_lo, value_hi; + nveul64_t value_lo; + nveul64_t value_hi; value_lo = osi_readla(osi_core, (nveu8_t *)osi_core->macsec_base + offset); @@ -786,26 +804,21 @@ static nve32_t macsec_enable(struct osi_core_priv_data *const osi_core, } val = osi_readla(osi_core, base + MACSEC_CONTROL0); - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Read MACSEC_CONTROL0: \n", val); + MACSEC_LOG("Read MACSEC_CONTROL0: 0x%x \n", val); if ((enable & OSI_MACSEC_TX_EN) == OSI_MACSEC_TX_EN) { - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Enabling macsec TX \n", 0ULL); + MACSEC_LOG("Enabling macsec TX\n"); val |= (MACSEC_TX_EN); } else { - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Disabling macsec TX \n", 0ULL); + MACSEC_LOG("Disabling macsec TX\n"); val &= ~(MACSEC_TX_EN); } if ((enable & OSI_MACSEC_RX_EN) == OSI_MACSEC_RX_EN) { - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Enabling macsec RX \n", 0ULL); + MACSEC_LOG("Enabling macsec RX\n"); val |= (MACSEC_RX_EN); } else { - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Disabling macsec RX \n", 0ULL); + MACSEC_LOG("Disabling macsec RX\n"); val &= ~(MACSEC_RX_EN); } @@ -816,7 +829,7 @@ static nve32_t macsec_enable(struct osi_core_priv_data *const osi_core, osi_core->is_macsec_enabled = OSI_DISABLE; } - LOG("Write MACSEC_CONTROL0: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_CONTROL0: 0x%x\n", val); osi_writela(osi_core, val, base + MACSEC_CONTROL0); exit: @@ -943,13 +956,17 @@ static nve32_t kt_key_write(struct osi_core_priv_data *const osi_core, static nve32_t validate_kt_config(const struct osi_macsec_kt_config *const kt_config) { + nve32_t ret = 0; + /* Validate KT config */ if ((kt_config->table_config.ctlr_sel > OSI_CTLR_SEL_MAX) || (kt_config->table_config.rw > OSI_RW_MAX) || (kt_config->table_config.index > OSI_TABLE_INDEX_MAX)) { - return -1; + ret = -1; + goto err; } - return 0; +err: + return ret; } @@ -962,7 +979,7 @@ static nve32_t macsec_kt_config(struct osi_core_priv_data *const osi_core, ret = validate_kt_config(kt_config); if (ret < 0) { - return ret; + goto err; } kt_config_reg = osi_readla(osi_core, base + MACSEC_GCM_KEYTABLE_CONFIG); @@ -977,7 +994,7 @@ static nve32_t macsec_kt_config(struct osi_core_priv_data *const osi_core, /* For write operation, load the lut_data registers */ ret = kt_key_write(osi_core, kt_config); if (ret < 0) { - return ret; + goto err; } } else { kt_config_reg &= ~MACSEC_KT_CONFIG_RW; @@ -992,15 +1009,16 @@ static nve32_t macsec_kt_config(struct osi_core_priv_data *const osi_core, /* Wait for this KT update to finish */ ret = poll_for_kt_update(osi_core); if (ret < 0) { - return ret; + goto err; } if (kt_config->table_config.rw == OSI_NONE) { ret = kt_key_read(osi_core, kt_config); if (ret < 0) { - return ret; + goto err; } } +err: return ret; } #endif /* MACSEC_KEY_PROGRAM */ @@ -1036,6 +1054,7 @@ static inline nve32_t poll_for_lut_update(struct osi_core_priv_data *osi_core) nveu32_t lut_config; nveu32_t count; nve32_t cond = 1; + nve32_t ret = 0; count = 0; while (cond == 1) { @@ -1044,7 +1063,8 @@ static inline nve32_t poll_for_lut_update(struct osi_core_priv_data *osi_core) OSI_LOG_ARG_HW_FAIL, "LUT update timed out\n", 0ULL); - return -1; + ret = -1; + goto exit; } count++; @@ -1060,8 +1080,8 @@ static inline nve32_t poll_for_lut_update(struct osi_core_priv_data *osi_core) osi_core->osd_ops.udelay(RETRY_DELAY); } } - - return 0; +exit: + return ret; } /** @@ -1295,7 +1315,7 @@ static void lut_read_inputs_vlan(const nveu32_t *const lut_data, * @retval -1 for failure */ static nve32_t lut_read_inputs(struct osi_macsec_lut_config *const lut_config, - nveu32_t *const lut_data) + const nveu32_t *const lut_data) { struct osi_lut_inputs entry = {0}; nveu32_t flags = 0; @@ -1387,7 +1407,8 @@ static nve32_t byp_lut_read(struct osi_core_priv_data *const osi_core, struct osi_macsec_lut_config *const lut_config) { nveu32_t lut_data[MACSEC_LUT_DATA_REG_CNT] = {0}; - nveu32_t flags = 0, val = 0; + nveu32_t flags = 0; + nveu32_t val = 0; nveu32_t index = lut_config->table_config.index; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu8_t *paddr = OSI_NULL; @@ -1398,7 +1419,8 @@ static nve32_t byp_lut_read(struct osi_core_priv_data *const osi_core, if (lut_read_inputs(lut_config, lut_data) != 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "LUT inputs error\n", 0ULL); - return -1; + ret = -1; + goto err; } /* Lookup output */ @@ -1437,6 +1459,7 @@ static nve32_t byp_lut_read(struct osi_core_priv_data *const osi_core, } lut_config->flags |= flags; } +err: return ret; } @@ -1540,7 +1563,8 @@ static nve32_t sci_lut_read(struct osi_core_priv_data *const osi_core, nve32_t ret = 0; if (index > OSI_SC_LUT_MAX_INDEX) { - return -1; + ret = -1; + goto exit; } read_lut_data(osi_core, lut_data); @@ -1549,7 +1573,8 @@ static nve32_t sci_lut_read(struct osi_core_priv_data *const osi_core, if (lut_read_inputs(lut_config, lut_data) != 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "LUT inputs error\n", 0ULL); - return -1; + ret = -1; + goto exit; } tx_sci_lut_read(osi_core, lut_config, lut_data); break; @@ -1584,8 +1609,7 @@ static nve32_t sci_lut_read(struct osi_core_priv_data *const osi_core, ret = -1; break; } - - /* Lookup output */ +exit: return ret; } @@ -1654,7 +1678,6 @@ static nve32_t sc_param_lut_read(struct osi_core_priv_data *const osi_core, break; } - /* Lookup output */ return ret; } @@ -1922,7 +1945,7 @@ static void tx_sa_state_lut_config(const struct osi_macsec_lut_config *const lut * @retval -1 on failure */ static nve32_t sa_state_lut_config(struct osi_core_priv_data *const osi_core, - struct osi_macsec_lut_config *const lut_config) + const struct osi_macsec_lut_config *const lut_config) { nveu32_t lut_data[MACSEC_LUT_DATA_REG_CNT] = {0}; struct osi_macsec_table_config table_config = lut_config->table_config; @@ -2083,7 +2106,7 @@ static void tx_sc_param_lut_config( * @retval -1 on failure */ static nve32_t sc_param_lut_config(struct osi_core_priv_data *const osi_core, - struct osi_macsec_lut_config *const lut_config) + const struct osi_macsec_lut_config *const lut_config) { nveu32_t lut_data[MACSEC_LUT_DATA_REG_CNT] = {0}; struct osi_macsec_table_config table_config = lut_config->table_config; @@ -2093,7 +2116,8 @@ static nve32_t sc_param_lut_config(struct osi_core_priv_data *const osi_core, if (entry.key_index_start > OSI_KEY_INDEX_MAX) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Invalid Key Index\n", 0ULL); - return -1; + ret = -1; + goto exit; } switch (table_config.ctlr_sel) { @@ -2111,7 +2135,7 @@ static nve32_t sc_param_lut_config(struct osi_core_priv_data *const osi_core, } commit_lut_data(osi_core, lut_data); - +exit: return ret; } @@ -2491,18 +2515,20 @@ static void lut_config_preempt_mask(const struct osi_macsec_lut_config *const lu * @retval 0 on success * @retval -1 on failure */ -static nve32_t lut_config_inputs(struct osi_macsec_lut_config *const lut_config, +static nve32_t lut_config_inputs(const struct osi_macsec_lut_config *const lut_config, nveu32_t *const lut_data) { struct osi_lut_inputs entry = lut_config->lut_in; nveu32_t flags = lut_config->flags; nveu32_t i, j = OSI_LUT_FLAGS_BYTE0_PATTERN_VALID; + nve32_t ret = 0; for (i = 0; i < OSI_LUT_BYTE_PATTERN_MAX; i++) { if ((flags & j) == j) { if (entry.byte_pattern_offset[i] > OSI_LUT_BYTE_PATTERN_MAX_OFFSET) { - return -1; + ret = -1; + goto exit; } } j <<= 1; @@ -2512,14 +2538,16 @@ static nve32_t lut_config_inputs(struct osi_macsec_lut_config *const lut_config, OSI_LUT_FLAGS_BYTE0_PATTERN_VALID) { if (entry.byte_pattern_offset[0] > OSI_LUT_BYTE_PATTERN_MAX_OFFSET) { - return -1; + ret = -1; + goto exit; } } if ((flags & OSI_LUT_FLAGS_VLAN_VALID) == OSI_LUT_FLAGS_VLAN_VALID) { if ((entry.vlan_pcp > OSI_VLAN_PCP_MAX) || (entry.vlan_id > OSI_VLAN_ID_MAX)) { - return -1; + ret = -1; + goto exit; } } @@ -2529,8 +2557,8 @@ static nve32_t lut_config_inputs(struct osi_macsec_lut_config *const lut_config, lut_config_vlan(lut_config, lut_data); lut_config_byte_pattern(lut_config, lut_data); lut_config_preempt_mask(lut_config, lut_data); - - return 0; +exit: + return ret; } /** @@ -2563,9 +2591,11 @@ static nve32_t rx_sci_lut_config( { nveu32_t flags = lut_config->flags; struct osi_sci_lut_outputs entry = lut_config->sci_lut_out; + nve32_t ret = 0; if (entry.sc_index > OSI_SC_INDEX_MAX) { - return -1; + ret = -1; + goto exit; } lut_data[0] |= ((nveu32_t)(entry.sci[0]) | @@ -2591,12 +2621,12 @@ static nve32_t rx_sci_lut_config( } lut_data[2] |= entry.sc_index << 10; - - return 0; +exit: + return ret; } /** - * @brief rx_sci_lut_config - update lut_data from lut_config for sci_lut + * @brief tx_sci_lut_config - update lut_data from lut_config for sci_lut * * @note * Algorithm: @@ -2621,15 +2651,17 @@ static nve32_t rx_sci_lut_config( * @retval -1 on failure */ static nve32_t tx_sci_lut_config( - struct osi_macsec_lut_config *const lut_config, + const struct osi_macsec_lut_config *const lut_config, nveu32_t *const lut_data) { nveu32_t flags = lut_config->flags; struct osi_sci_lut_outputs entry = lut_config->sci_lut_out; nveu32_t an_valid = entry.an_valid; + nve32_t ret = 0; if (lut_config_inputs(lut_config, lut_data) != 0) { - return -1; + ret = -1; + goto exit; } /* Lookup result fields */ @@ -2656,7 +2688,8 @@ static nve32_t tx_sci_lut_config( OSI_LUT_FLAGS_DVLAN_OUTER_INNER_TAG_SEL) { lut_data[6] |= MACSEC_TX_SCI_LUT_DVLAN_OUTER_INNER_TAG_SEL; } - return 0; +exit: + return ret; } /** @@ -2686,7 +2719,7 @@ static nve32_t tx_sci_lut_config( * @retval -1 on failure */ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, - struct osi_macsec_lut_config *const lut_config) + const struct osi_macsec_lut_config *const lut_config) { nveu32_t lut_data[MACSEC_LUT_DATA_REG_CNT] = {0}; struct osi_macsec_table_config table_config = lut_config->table_config; @@ -2700,7 +2733,8 @@ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, (lut_config->table_config.index > OSI_SC_LUT_MAX_INDEX)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "SCI LUT config err - Invalid Index\n", 0ULL); - return -1; + ret = -1; + goto exit; } switch (table_config.ctlr_sel) { @@ -2708,7 +2742,8 @@ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, if (tx_sci_lut_config(lut_config, lut_data) < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to config tx sci LUT\n", 0ULL); - return -1; + ret = -1; + goto exit; } commit_lut_data(osi_core, lut_data); @@ -2732,7 +2767,8 @@ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, if (rx_sci_lut_config(lut_config, lut_data) < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to config rx sci LUT\n", 0ULL); - return -1; + ret = -1; + goto exit; } commit_lut_data(osi_core, lut_data); @@ -2758,6 +2794,7 @@ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, ret = -1; break; } +exit: return ret; } @@ -2787,7 +2824,7 @@ static nve32_t sci_lut_config(struct osi_core_priv_data *const osi_core, * @retval -1 on failure */ static nve32_t byp_lut_config(struct osi_core_priv_data *const osi_core, - struct osi_macsec_lut_config *const lut_config) + const struct osi_macsec_lut_config *const lut_config) { nveu32_t lut_data[MACSEC_LUT_DATA_REG_CNT] = {0}; nveu32_t flags = lut_config->flags; @@ -2799,7 +2836,8 @@ static nve32_t byp_lut_config(struct osi_core_priv_data *const osi_core, if (lut_config_inputs(lut_config, lut_data) != 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "LUT inputs error\n", 0ULL); - return -1; + ret = -1; + goto exit; } /* Lookup output */ @@ -2860,7 +2898,7 @@ static nve32_t byp_lut_config(struct osi_core_priv_data *const osi_core, ret = -1; break; } - +exit: return ret; } @@ -2888,7 +2926,7 @@ static nve32_t byp_lut_config(struct osi_core_priv_data *const osi_core, * @retval -1 on failure */ static inline nve32_t lut_data_write(struct osi_core_priv_data *const osi_core, - struct osi_macsec_lut_config *const lut_config) + const struct osi_macsec_lut_config *const lut_config) { nve32_t ret = 0; @@ -2942,19 +2980,23 @@ static inline nve32_t lut_data_write(struct osi_core_priv_data *const osi_core, */ static nve32_t validate_lut_conf(const struct osi_macsec_lut_config *const lut_config) { + nve32_t ret = 0; + /* Validate LUT config */ if ((lut_config->table_config.ctlr_sel > OSI_CTLR_SEL_MAX) || (lut_config->table_config.rw > OSI_RW_MAX) || (lut_config->table_config.index > OSI_TABLE_INDEX_MAX) || (lut_config->lut_sel > OSI_LUT_SEL_MAX)) { - LOG("Validating LUT config failed. ctrl: %hu," + MACSEC_LOG("Validating LUT config failed. ctrl: %hu," " rw: %hu, index: %hu, lut_sel: %hu", lut_config->table_config.ctlr_sel, lut_config->table_config.rw, lut_config->table_config.index, lut_config->lut_sel); - return -1; + ret = -1; + goto exit; } - return 0; +exit: + return ret; } /** @@ -2994,13 +3036,14 @@ static nve32_t macsec_lut_config(struct osi_core_priv_data *const osi_core, nveu8_t *base = (nveu8_t *)osi_core->macsec_base; if (validate_lut_conf(lut_config) < 0) { - return -1; + ret = -1; + goto exit; } /* Wait for previous LUT update to finish */ ret = poll_for_lut_update(osi_core); if (ret < 0) { - return ret; + goto exit; } lut_config_reg = osi_readla(osi_core, base + MACSEC_LUT_CONFIG); @@ -3015,7 +3058,7 @@ static nve32_t macsec_lut_config(struct osi_core_priv_data *const osi_core, /* For write operation, load the lut_data registers */ ret = lut_data_write(osi_core, lut_config); if (ret < 0) { - return ret; + goto exit; } } else { lut_config_reg &= ~MACSEC_LUT_CONFIG_RW; @@ -3034,17 +3077,17 @@ static nve32_t macsec_lut_config(struct osi_core_priv_data *const osi_core, /* Wait for this LUT update to finish */ ret = poll_for_lut_update(osi_core); if (ret < 0) { - return ret; + goto exit; } if (lut_config->table_config.rw == OSI_NONE) { ret = lut_data_read(osi_core, lut_config); if (ret < 0) { - return ret; + goto exit; } } - - return 0; +exit: + return ret; } /** @@ -3072,7 +3115,7 @@ static inline void handle_rx_sc_invalid_key( nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t clear = 0; - LOG("%s()\n", __func__); + MACSEC_LOG("%s()\n", __func__); /** check which SC/AN had triggered and clear */ /* rx_sc0_7 */ @@ -3108,7 +3151,7 @@ static inline void handle_tx_sc_invalid_key( nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nveu32_t clear = 0; - LOG("%s()\n", __func__); + MACSEC_LOG("%s()\n", __func__); /** check which SC/AN had triggered and clear */ /* tx_sc0_7 */ @@ -3141,9 +3184,10 @@ static inline void handle_tx_sc_invalid_key( static inline void handle_safety_err_irq( const struct osi_core_priv_data *const osi_core) { + (void) osi_core; OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, "Safety Error Handler \n", 0ULL); - LOG("%s()\n", __func__); + MACSEC_LOG("%s()\n", __func__); } /** @@ -3417,7 +3461,7 @@ static inline void handle_tx_irq(struct osi_core_priv_data *const osi_core) #endif tx_isr = osi_readla(osi_core, addr + MACSEC_TX_ISR); - LOG("%s(): tx_isr 0x%x\n", __func__, tx_isr); + MACSEC_LOG("%s(): tx_isr 0x%x\n", __func__, tx_isr); if ((tx_isr & MACSEC_TX_DBG_BUF_CAPTURE_DONE) == MACSEC_TX_DBG_BUF_CAPTURE_DONE) { handle_dbg_evt_capture_done(osi_core, OSI_CTLR_SEL_TX); @@ -3509,7 +3553,8 @@ static inline void handle_tx_irq(struct osi_core_priv_data *const osi_core) */ static inline void handle_rx_irq(struct osi_core_priv_data *const osi_core) { - nveu32_t rx_isr, clear = 0; + nveu32_t rx_isr; + nveu32_t clear = 0; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; #ifdef HSI_SUPPORT nveu64_t rx_crc_err = 0; @@ -3517,7 +3562,7 @@ static inline void handle_rx_irq(struct osi_core_priv_data *const osi_core) #endif rx_isr = osi_readla(osi_core, addr + MACSEC_RX_ISR); - LOG("%s(): rx_isr 0x%x\n", __func__, rx_isr); + MACSEC_LOG("%s(): rx_isr 0x%x\n", __func__, rx_isr); if ((rx_isr & MACSEC_RX_DBG_BUF_CAPTURE_DONE) == MACSEC_RX_DBG_BUF_CAPTURE_DONE) { @@ -3616,15 +3661,23 @@ static inline void handle_rx_irq(struct osi_core_priv_data *const osi_core) */ static inline void handle_common_irq(struct osi_core_priv_data *const osi_core) { - nveu32_t common_isr, clear = 0; + nveu32_t common_isr; + nveu32_t clear = 0; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; common_isr = osi_readla(osi_core, addr + MACSEC_COMMON_ISR); - LOG("%s(): common_isr 0x%x\n", __func__, common_isr); + MACSEC_LOG("%s(): common_isr 0x%x\n", __func__, common_isr); if ((common_isr & MACSEC_SECURE_REG_VIOL) == MACSEC_SECURE_REG_VIOL) { CERT_C__POST_INC__U64(osi_core->macsec_irq_stats.secure_reg_viol); clear |= MACSEC_SECURE_REG_VIOL; +#ifdef HSI_SUPPORT + if (osi_core->hsi.enabled == OSI_ENABLE) { + osi_core->hsi.macsec_err_code[MACSEC_REG_VIOL_ERR_IDX] = + OSI_MACSEC_REG_VIOL_ERR; + osi_core->hsi.macsec_report_err = OSI_ENABLE; + } +#endif } if ((common_isr & MACSEC_RX_UNINIT_KEY_SLOT) == @@ -3656,7 +3709,7 @@ static inline void handle_common_irq(struct osi_core_priv_data *const osi_core) } /** - * @brief macsec_handle_ns_irq - Non-secure interrupt handler + * @brief macsec_handle_irq - Macsec interrupt handler * * @note * Algorithm: @@ -3678,13 +3731,13 @@ static inline void handle_common_irq(struct osi_core_priv_data *const osi_core) * - Run time: Yes * - De-initialization: No */ -static void macsec_handle_ns_irq(struct osi_core_priv_data *const osi_core) +static void macsec_handle_irq(struct osi_core_priv_data *const osi_core) { nveu32_t irq_common_sr, common_isr; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; irq_common_sr = osi_readla(osi_core, addr + MACSEC_INTERRUPT_COMMON_SR); - LOG("%s(): common_sr 0x%x\n", __func__, irq_common_sr); + MACSEC_LOG("%s(): common_sr 0x%x\n", __func__, irq_common_sr); if ((irq_common_sr & MACSEC_COMMON_SR_TX) == MACSEC_COMMON_SR_TX) { handle_tx_irq(osi_core); } @@ -3704,38 +3757,6 @@ static void macsec_handle_ns_irq(struct osi_core_priv_data *const osi_core) } } -/** - * @brief macsec_handle_s_irq - secure interrupt handler - * - * @note - * Algorithm: - * - Handles common interrupts - * - Refer to MACSEC column of <<******, (sequence diagram)>> for API details. - * - TraceID: *********** - * - * @param[in] osi_core: OSI core private data structure. used param macsec_base - * - * @pre MACSEC needs to be out of reset and proper clock configured. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void macsec_handle_s_irq(struct osi_core_priv_data *const osi_core) -{ - nveu32_t common_isr; - nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; - - LOG("%s()\n", __func__); - - common_isr = osi_readla(osi_core, addr + MACSEC_COMMON_ISR); - if (common_isr != OSI_NONE) { - handle_common_irq(osi_core); - } -} - /** * @brief macsec_cipher_config - Configures the cipher type * @@ -3764,6 +3785,7 @@ static nve32_t macsec_cipher_config(struct osi_core_priv_data *const osi_core, { nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t val; + nve32_t ret = 0; val = osi_readla(osi_core, base + MACSEC_GCM_AES_CONTROL_0); @@ -3776,13 +3798,16 @@ static nve32_t macsec_cipher_config(struct osi_core_priv_data *const osi_core, val |= MACSEC_TX_AES_MODE_AES256; val |= MACSEC_RX_AES_MODE_AES256; } else { - return -1; + ret = -1; + goto exit; } osi_writela(osi_core, val, base + MACSEC_GCM_AES_CONTROL_0); - return 0; +exit: + return ret; } +#ifdef DEBUG_MACSEC /** * @brief macsec_loopback_config - Configures the loopback mode * @@ -3812,6 +3837,7 @@ static nve32_t macsec_loopback_config( { nveu8_t *base = (nveu8_t *)osi_core->macsec_base; nveu32_t val; + nve32_t ret = 0; val = osi_readla(osi_core, base + MACSEC_CONTROL1); @@ -3820,12 +3846,15 @@ static nve32_t macsec_loopback_config( } else if (enable == OSI_DISABLE) { val &= ~MACSEC_LOOPBACK_MODE_EN; } else { - return -1; + ret = -1; + goto exit; } osi_writela(osi_core, val, base + MACSEC_CONTROL1); - return 0; +exit: + return ret; } +#endif /* DEBUG_MACSEC */ /** * @brief clear_byp_lut - Clears the bypass lut @@ -3867,11 +3896,11 @@ static nve32_t clear_byp_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing CTLR:BYPASS LUT:INDEX: \n", j); - return ret; + goto exit; } } } - +exit: return ret; } @@ -3915,10 +3944,11 @@ static nve32_t clear_sci_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing CTLR:SCI LUT:INDEX: \n", j); - return ret; + goto exit; } } } +exit: return ret; } @@ -3962,10 +3992,11 @@ static nve32_t clear_sc_param_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing CTLR:SC PARAM LUT:INDEX: \n", j); - return ret; + goto exit; } } } +exit: return ret; } @@ -4010,10 +4041,11 @@ static nve32_t clear_sc_state_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing CTLR:SC STATE LUT:INDEX: \n", j); - return ret; + goto exit; } } } +exit: return ret; } @@ -4057,7 +4089,7 @@ static nve32_t clear_sa_state_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing TX CTLR:SA STATE LUT:INDEX: \n", j); - return ret; + goto exit; } } @@ -4070,9 +4102,10 @@ static nve32_t clear_sa_state_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing RX CTLR:SA STATE LUT:INDEX: \n", j); - return ret; + goto exit; } } +exit: return ret; } @@ -4117,23 +4150,23 @@ static nve32_t clear_lut(struct osi_core_priv_data *const osi_core) /* Clear all the LUT's which have a dedicated LUT valid bit per entry */ ret = clear_byp_lut(osi_core); if (ret < 0) { - return ret; + goto exit; } ret = clear_sci_lut(osi_core); if (ret < 0) { - return ret; + goto exit; } ret = clear_sc_param_lut(osi_core); if (ret < 0) { - return ret; + goto exit; } ret = clear_sc_state_lut(osi_core); if (ret < 0) { - return ret; + goto exit; } ret = clear_sa_state_lut(osi_core); if (ret < 0) { - return ret; + goto exit; } #ifdef MACSEC_KEY_PROGRAM @@ -4148,12 +4181,12 @@ static nve32_t clear_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Error clearing KT LUT:INDEX: \n", j); - return ret; + goto exit; } } } #endif /* MACSEC_KEY_PROGRAM */ - +exit: return ret; } @@ -4182,13 +4215,16 @@ static nve32_t clear_lut(struct osi_core_priv_data *const osi_core) static nve32_t macsec_deinit(struct osi_core_priv_data *const osi_core) { nveu32_t i; +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) const struct core_local *l_core = (void *)osi_core; +#endif for (i = OSI_CTLR_SEL_TX; i <= OSI_CTLR_SEL_RX; i++) { osi_memset(&osi_core->macsec_lut_status[i], OSI_NONE, sizeof(struct osi_macsec_lut_status)); } +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /* Update MAC as per macsec requirement */ if (l_core->ops_p->macsec_config_mac != OSI_NULL) { l_core->ops_p->macsec_config_mac(osi_core, OSI_DISABLE); @@ -4196,6 +4232,7 @@ static nve32_t macsec_deinit(struct osi_core_priv_data *const osi_core) OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed config MAC per macsec\n", 0ULL); } +#endif return 0; } @@ -4228,28 +4265,30 @@ static nve32_t macsec_update_mtu(struct osi_core_priv_data *const osi_core, { nveu32_t val = 0; nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; + nve32_t ret = 0; if (mtu > OSI_MAX_MTU_SIZE) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Invalid MTU received!!\n", mtu); - return -1; + ret = -1; + goto exit; } /* Set MTU */ val = osi_readla(osi_core, addr + MACSEC_TX_MTU_LEN); - LOG("Read MACSEC_TX_MTU_LEN: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_TX_MTU_LEN: 0x%x\n", val); val &= ~(MTU_LENGTH_MASK); val |= (mtu & MTU_LENGTH_MASK); - LOG("Write MACSEC_TX_MTU_LEN: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_TX_MTU_LEN: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_TX_MTU_LEN); val = osi_readla(osi_core, addr + MACSEC_RX_MTU_LEN); - LOG("Read MACSEC_RX_MTU_LEN: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_RX_MTU_LEN: 0x%x\n", val); val &= ~(MTU_LENGTH_MASK); val |= (mtu & MTU_LENGTH_MASK); - LOG("Write MACSEC_RX_MTU_LEN: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_RX_MTU_LEN: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_RX_MTU_LEN); - - return 0; +exit: + return ret; } /** @@ -4304,9 +4343,9 @@ static nve32_t set_byp_lut(struct osi_core_priv_data *const osi_core) if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set BYP for BC addr\n", (nveul64_t)ret); - return ret; + goto exit; } else { - osi_core->macsec_lut_status[i].next_byp_idx = + osi_core->macsec_lut_status[i].next_byp_idx = (nveu16_t ) ((osi_core->macsec_lut_status[i].next_byp_idx & 0xFFU) + 1U); } } @@ -4324,17 +4363,91 @@ static nve32_t set_byp_lut(struct osi_core_priv_data *const osi_core) OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set BYP for MKPDU multicast DA\n", (nveul64_t)ret); - return ret; + goto exit; } else { - osi_core->macsec_lut_status[i].next_byp_idx = + osi_core->macsec_lut_status[i].next_byp_idx = (nveu16_t ) ((osi_core->macsec_lut_status[i].next_byp_idx & 0xFFU) + 1U); } } - return 0; +exit: + return ret; } +#ifdef DEBUG_MACSEC +static void macsec_intr_config(struct osi_core_priv_data *const osi_core, nveu32_t enable) +{ + nveu32_t val = 0; + nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; + + if (enable == OSI_ENABLE) { + val = osi_readla(osi_core, addr + MACSEC_TX_IMR); + MACSEC_LOG("Read MACSEC_TX_IMR: 0x%x\n", val); + val |= (MACSEC_TX_DBG_BUF_CAPTURE_DONE_INT_EN | + MACSEC_TX_MTU_CHECK_FAIL_INT_EN | + MACSEC_TX_SC_AN_NOT_VALID_INT_EN | + MACSEC_TX_AES_GCM_BUF_OVF_INT_EN | + MACSEC_TX_PN_EXHAUSTED_INT_EN | + MACSEC_TX_PN_THRSHLD_RCHD_INT_EN); + osi_writela(osi_core, val, addr + MACSEC_TX_IMR); + MACSEC_LOG("Write MACSEC_TX_IMR: 0x%x\n", val); + + val = osi_readla(osi_core, addr + MACSEC_RX_IMR); + MACSEC_LOG("Read MACSEC_RX_IMR: 0x%x\n", val); + + val |= (MACSEC_RX_DBG_BUF_CAPTURE_DONE_INT_EN | + RX_REPLAY_ERROR_INT_EN | + MACSEC_RX_MTU_CHECK_FAIL_INT_EN | + MACSEC_RX_AES_GCM_BUF_OVF_INT_EN | + MACSEC_RX_PN_EXHAUSTED_INT_EN + ); + osi_writela(osi_core, val, addr + MACSEC_RX_IMR); + MACSEC_LOG("Write MACSEC_RX_IMR: 0x%x\n", val); + + val = osi_readla(osi_core, addr + MACSEC_COMMON_IMR); + MACSEC_LOG("Read MACSEC_COMMON_IMR: 0x%x\n", val); + val |= (MACSEC_RX_UNINIT_KEY_SLOT_INT_EN | + MACSEC_RX_LKUP_MISS_INT_EN | + MACSEC_TX_UNINIT_KEY_SLOT_INT_EN | + MACSEC_TX_LKUP_MISS_INT_EN); + osi_writela(osi_core, val, addr + MACSEC_COMMON_IMR); + MACSEC_LOG("Write MACSEC_COMMON_IMR: 0x%x\n", val); + } else { + val = osi_readla(osi_core, addr + MACSEC_TX_IMR); + MACSEC_LOG("Read MACSEC_TX_IMR: 0x%x\n", val); + val &= (~MACSEC_TX_DBG_BUF_CAPTURE_DONE_INT_EN & + ~MACSEC_TX_MTU_CHECK_FAIL_INT_EN & + ~MACSEC_TX_SC_AN_NOT_VALID_INT_EN & + ~MACSEC_TX_AES_GCM_BUF_OVF_INT_EN & + ~MACSEC_TX_PN_EXHAUSTED_INT_EN & + ~MACSEC_TX_PN_THRSHLD_RCHD_INT_EN); + osi_writela(osi_core, val, addr + MACSEC_TX_IMR); + MACSEC_LOG("Write MACSEC_TX_IMR: 0x%x\n", val); + + val = osi_readla(osi_core, addr + MACSEC_RX_IMR); + MACSEC_LOG("Read MACSEC_RX_IMR: 0x%x\n", val); + val &= (~MACSEC_RX_DBG_BUF_CAPTURE_DONE_INT_EN & + ~RX_REPLAY_ERROR_INT_EN & + ~MACSEC_RX_MTU_CHECK_FAIL_INT_EN & + ~MACSEC_RX_AES_GCM_BUF_OVF_INT_EN & + ~MACSEC_RX_PN_EXHAUSTED_INT_EN + ); + osi_writela(osi_core, val, addr + MACSEC_RX_IMR); + MACSEC_LOG("Write MACSEC_RX_IMR: 0x%x\n", val); + + val = osi_readla(osi_core, addr + MACSEC_COMMON_IMR); + MACSEC_LOG("Read MACSEC_COMMON_IMR: 0x%x\n", val); + val &= (~MACSEC_RX_UNINIT_KEY_SLOT_INT_EN & + ~MACSEC_RX_LKUP_MISS_INT_EN & + ~MACSEC_TX_UNINIT_KEY_SLOT_INT_EN & + ~MACSEC_TX_LKUP_MISS_INT_EN); + osi_writela(osi_core, val, addr + MACSEC_COMMON_IMR); + MACSEC_LOG("Write MACSEC_COMMON_IMR: 0x%x\n", val); + } +} +#endif /* DEBUG_MACSEC */ + /** - * @brief macsec_init - Inititlizes macsec + * @brief macsec_initialize - Inititlizes macsec * * @note * Algorithm: @@ -4370,14 +4483,16 @@ static nve32_t set_byp_lut(struct osi_core_priv_data *const osi_core) * @retval 0 for success * @retval -1 for failure */ -static nve32_t macsec_init(struct osi_core_priv_data *const osi_core, - nveu32_t mtu) +static nve32_t macsec_initialize(struct osi_core_priv_data *const osi_core, nveu32_t mtu) { nveu32_t val = 0; +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) const struct core_local *l_core = (void *)osi_core; +#endif nveu8_t *addr = (nveu8_t *)osi_core->macsec_base; nve32_t ret = 0; +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /* Update MAC value as per macsec requirement */ if (l_core->ops_p->macsec_config_mac != OSI_NULL) { l_core->ops_p->macsec_config_mac(osi_core, OSI_ENABLE); @@ -4385,11 +4500,11 @@ static nve32_t macsec_init(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to config mac per macsec\n", 0ULL); } - +#endif /* Set MTU */ ret = macsec_update_mtu(osi_core, mtu); if (ret < 0) { - return ret; + goto exit; } /* set TX/RX SOT, as SOT value different for eqos. @@ -4397,83 +4512,64 @@ static nve32_t macsec_init(struct osi_core_priv_data *const osi_core, */ if (osi_core->mac == OSI_MAC_HW_EQOS) { val = osi_readla(osi_core, addr + MACSEC_TX_SOT_DELAY); - LOG("Read MACSEC_TX_SOT_DELAY: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_TX_SOT_DELAY: 0x%x\n", val); val &= ~(SOT_LENGTH_MASK); val |= (EQOS_MACSEC_SOT_DELAY & SOT_LENGTH_MASK); - LOG("Write MACSEC_TX_SOT_DELAY: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_TX_SOT_DELAY: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_TX_SOT_DELAY); val = osi_readla(osi_core, addr + MACSEC_RX_SOT_DELAY); - LOG("Read MACSEC_RX_SOT_DELAY: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_RX_SOT_DELAY: 0x%x\n", val); val &= ~(SOT_LENGTH_MASK); val |= (EQOS_MACSEC_SOT_DELAY & SOT_LENGTH_MASK); - LOG("Write MACSEC_RX_SOT_DELAY: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_RX_SOT_DELAY: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_RX_SOT_DELAY); } /* Set essential MACsec control configuration */ val = osi_readla(osi_core, addr + MACSEC_CONTROL0); - LOG("Read MACSEC_CONTROL0: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_CONTROL0: 0x%x\n", val); val |= (MACSEC_TX_LKUP_MISS_NS_INTR | MACSEC_RX_LKUP_MISS_NS_INTR | MACSEC_TX_LKUP_MISS_BYPASS | MACSEC_RX_LKUP_MISS_BYPASS); val &= ~(MACSEC_VALIDATE_FRAMES_MASK); val |= MACSEC_VALIDATE_FRAMES_STRICT; val |= MACSEC_RX_REPLAY_PROT_EN; - LOG("Write MACSEC_CONTROL0: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_CONTROL0: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_CONTROL0); val = osi_readla(osi_core, addr + MACSEC_CONTROL1); - LOG("Read MACSEC_CONTROL1: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_CONTROL1: 0x%x\n", val); val |= (MACSEC_RX_MTU_CHECK_EN | MACSEC_TX_LUT_PRIO_BYP | MACSEC_TX_MTU_CHECK_EN); - LOG("Write MACSEC_CONTROL1: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_CONTROL1: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_CONTROL1); val = osi_readla(osi_core, addr + MACSEC_STATS_CONTROL_0); - LOG("Read MACSEC_STATS_CONTROL_0: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_STATS_CONTROL_0: 0x%x\n", val); /* set STATS rollover bit */ val |= MACSEC_STATS_CONTROL0_CNT_RL_OVR_CPY; - LOG("Write MACSEC_STATS_CONTROL_0: 0x%x\n", val); + MACSEC_LOG("Write MACSEC_STATS_CONTROL_0: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_STATS_CONTROL_0); - /* Enable default interrupts needed */ + /* Enable default HSI related interrupts needed */ val = osi_readla(osi_core, addr + MACSEC_TX_IMR); - LOG("Read MACSEC_TX_IMR: 0x%x\n", val); - val |= (MACSEC_TX_DBG_BUF_CAPTURE_DONE_INT_EN | - MACSEC_TX_MTU_CHECK_FAIL_INT_EN | - MACSEC_TX_MAC_CRC_ERROR_INT_EN | - MACSEC_TX_SC_AN_NOT_VALID_INT_EN | - MACSEC_TX_AES_GCM_BUF_OVF_INT_EN | - MACSEC_TX_PN_EXHAUSTED_INT_EN | - MACSEC_TX_PN_THRSHLD_RCHD_INT_EN); - LOG("Write MACSEC_TX_IMR: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_TX_IMR: 0x%x\n", val); + val |= MACSEC_TX_MAC_CRC_ERROR_INT_EN; + MACSEC_LOG("Write MACSEC_TX_IMR: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_TX_IMR); /* set ICV error threshold to 1 */ osi_writela(osi_core, 1U, addr + MACSEC_RX_ICV_ERR_CNTRL); - + /* Enabling interrupts only related to HSI */ val = osi_readla(osi_core, addr + MACSEC_RX_IMR); - LOG("Read MACSEC_RX_IMR: 0x%x\n", val); - - val |= (MACSEC_RX_DBG_BUF_CAPTURE_DONE_INT_EN | - MACSEC_RX_ICV_ERROR_INT_EN | RX_REPLAY_ERROR_INT_EN | - MACSEC_RX_MTU_CHECK_FAIL_INT_EN | - MACSEC_RX_MAC_CRC_ERROR_INT_EN | - MACSEC_RX_AES_GCM_BUF_OVF_INT_EN | - MACSEC_RX_PN_EXHAUSTED_INT_EN - ); - LOG("Write MACSEC_RX_IMR: 0x%x\n", val); + MACSEC_LOG("Read MACSEC_RX_IMR: 0x%x\n", val); + val |= (MACSEC_RX_ICV_ERROR_INT_EN | + MACSEC_RX_MAC_CRC_ERROR_INT_EN); + MACSEC_LOG("Write MACSEC_RX_IMR: 0x%x\n", val); osi_writela(osi_core, val, addr + MACSEC_RX_IMR); val = osi_readla(osi_core, addr + MACSEC_COMMON_IMR); - LOG("Read MACSEC_COMMON_IMR: 0x%x\n", val); - - val |= (MACSEC_SECURE_REG_VIOL_INT_EN | - MACSEC_RX_UNINIT_KEY_SLOT_INT_EN | - MACSEC_RX_LKUP_MISS_INT_EN | - MACSEC_TX_UNINIT_KEY_SLOT_INT_EN | - MACSEC_TX_LKUP_MISS_INT_EN); - LOG("Write MACSEC_COMMON_IMR: 0x%x\n", val); + val |= MACSEC_SECURE_REG_VIOL_INT_EN; osi_writela(osi_core, val, addr + MACSEC_COMMON_IMR); /* Set AES mode @@ -4485,9 +4581,11 @@ static nve32_t macsec_init(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Invalidating all LUT's failed\n", (nveul64_t)ret); - return ret; + goto exit; } - return set_byp_lut(osi_core); + ret = set_byp_lut(osi_core); +exit: + return ret; } /** @@ -4521,16 +4619,17 @@ static struct osi_macsec_sc_info *find_existing_sc( { struct osi_macsec_lut_status *lut_status_ptr = &osi_core->macsec_lut_status[ctlr]; + struct osi_macsec_sc_info *sc_found = OSI_NULL; nveu32_t i; for (i = 0; i < OSI_MAX_NUM_SC; i++) { if (osi_memcmp(lut_status_ptr->sc_info[i].sci, sc->sci, (nve32_t)OSI_SCI_LEN) == OSI_NONE_SIGNED) { - return &lut_status_ptr->sc_info[i]; + sc_found = &lut_status_ptr->sc_info[i]; } } - return OSI_NULL; + return sc_found; } /** @@ -4564,7 +4663,7 @@ static nveu32_t get_avail_sc_idx(const struct osi_core_priv_data *const osi_core for (i = 0; i < OSI_MAX_NUM_SC; i++) { if (lut_status_ptr->sc_info[i].an_valid == OSI_NONE) { - return i; + break; } } return i; @@ -4609,24 +4708,28 @@ static nve32_t macsec_get_key_index(struct osi_core_priv_data *const osi_core, (ctlr > OSI_CTLR_SEL_MAX)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Params validation failed\n", 0ULL); - return -1; + ret = -1; + goto exit; } ret = osi_memcpy(sc.sci, sci, OSI_SCI_LEN); if (ret < OSI_NONE_SIGNED) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "memcpy failed\n", 0ULL); - return -1; + ret = -1; + goto exit; } sc_info = find_existing_sc(osi_core, &sc, ctlr); if (sc_info == OSI_NULL) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "SCI Not found\n", 0ULL); - return -1; + ret = -1; + goto exit; } *key_index = (sc_info->sc_idx_start * OSI_MAX_NUM_SA); - return 0; +exit: + return ret; } /** @@ -4670,7 +4773,7 @@ static nve32_t del_upd_sc(struct osi_core_priv_data *const osi_core, #endif /* MACSEC_KEY_PROGRAM */ struct osi_macsec_lut_config lut_config = {0}; struct osi_macsec_table_config *table_config; - nve32_t ret; + nve32_t ret = 0; /* All input/output fields are already zero'd in declaration. * Write all 0's to LUT index to clear everything @@ -4686,13 +4789,14 @@ static nve32_t del_upd_sc(struct osi_core_priv_data *const osi_core, if (existing_sc->curr_an == sc->curr_an) { /* 1. SCI LUT */ lut_config.lut_sel = OSI_LUT_SEL_SCI; - table_config->index = (nveu16_t)(existing_sc->sc_idx_start); + table_config->index = (nveu16_t)(existing_sc->sc_idx_start & 0xFFU); ret = macsec_lut_config(osi_core, &lut_config); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to del SCI LUT idx\n", sc->sc_idx_start); - return -1; + ret = -1; + goto exit; } /* 2. SC Param LUT */ @@ -4701,7 +4805,8 @@ static nve32_t del_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to del SC param\n", (nveul64_t)ret); - return -1; + ret = -1; + goto exit; } /* 3. SC state LUT */ @@ -4710,24 +4815,26 @@ static nve32_t del_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to del SC state\n", (nveul64_t)ret); - return -1; + ret = -1; + goto exit; } } /* 4. SA State LUT */ lut_config.lut_sel = OSI_LUT_SEL_SA_STATE; - table_config->index = (nveu16_t)((existing_sc->sc_idx_start * OSI_MAX_NUM_SA) + - sc->curr_an); + table_config->index = (nveu16_t)(((existing_sc->sc_idx_start * OSI_MAX_NUM_SA) + + sc->curr_an) & (0xFFU)); ret = macsec_lut_config(osi_core, &lut_config); if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to del SA state\n", (nveul64_t)ret); - return -1; + ret = -1; + goto exit; } /* Store key table index returned to osd */ - *kt_idx = (nveu16_t)((existing_sc->sc_idx_start * OSI_MAX_NUM_SA) + - sc->curr_an); + *kt_idx = (nveu16_t)(((existing_sc->sc_idx_start * OSI_MAX_NUM_SA) + + sc->curr_an) & (0xFFU)); #ifdef MACSEC_KEY_PROGRAM /* 5. Key LUT */ table_config = &kt_config.table_config; @@ -4739,13 +4846,14 @@ static nve32_t del_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to del SAK\n", (nveul64_t)ret); - return -1; + ret = -1; + goto exit; } #endif /* MACSEC_KEY_PROGRAM */ existing_sc->an_valid &= ~OSI_BIT(sc->curr_an); - - return 0; +exit: + return ret; } /** @@ -4771,6 +4879,7 @@ static nve32_t del_upd_sc(struct osi_core_priv_data *const osi_core, static void print_error(const struct osi_core_priv_data *const osi_core, nve32_t ret) { + (void) osi_core; if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to config macsec\n", (nveul64_t)ret); @@ -4808,6 +4917,87 @@ static void copy_rev_order(nveu8_t *dst_buff, const nveu8_t *src_buff, nveu16_t } } +/** + * @brief add_upd_sc_err_cleanup - Helper function to handle error conditions in add_upd_sc + * + * @note + * Algorithm: + * - Depending on the error_mask passed clear the LUTs + * - Refer to MACSEC column of <<******, (sequence diagram)>> for API details. + * - TraceID: *********** + * + * @param[in] osi_core: OSI core private data structure. used param macsec_base + * @param[in] mask: Error mask that indicate which LUTs need to be cleared + * @param[in] ctlr: Controller to be selected + * @param[in] sc: Pointer to the SC that was intended to be added + * + * @pre MACSEC needs to be out of reset and proper clock configured. + * + * @note + * API Group: + * - Initialization: No + * - Run time: Yes + * - De-initialization: No + */ +static void add_upd_sc_err_cleanup(struct osi_core_priv_data *const osi_core, + nveu8_t mask, nveu16_t ctlr, + const struct osi_macsec_sc_info *const sc) +{ + struct osi_macsec_lut_config lut_config = {0}; + struct osi_macsec_table_config *table_config; + nve32_t ret_fail = 0; + nveu8_t error_mask = mask; + + if ((error_mask & OSI_BIT(3)) != OSI_NONE) { + /* Cleanup SCI LUT */ + error_mask &= ((~OSI_BIT(3)) & (0xFFU)); + osi_memset(&lut_config, 0, sizeof(lut_config)); + table_config = &lut_config.table_config; + table_config->ctlr_sel = ctlr; + table_config->rw = OSI_LUT_WRITE; + lut_config.lut_sel = OSI_LUT_SEL_SCI; + table_config->index = (nveu16_t)(sc->sc_idx_start & 0xFFU); + ret_fail = macsec_lut_config(osi_core, &lut_config); + print_error(osi_core, ret_fail); + } + if ((error_mask & OSI_BIT(2)) != OSI_NONE) { + /* cleanup SC param */ + error_mask &= ((~OSI_BIT(2)) & (0xFFU)); + osi_memset(&lut_config, 0, sizeof(lut_config)); + table_config = &lut_config.table_config; + table_config->ctlr_sel = ctlr; + lut_config.lut_sel = OSI_LUT_SEL_SC_PARAM; + table_config->index = (nveu16_t)(sc->sc_idx_start & 0xFFU); + ret_fail = macsec_lut_config(osi_core, &lut_config); + print_error(osi_core, ret_fail); + } + if ((error_mask & OSI_BIT(1)) != OSI_NONE) { + /* Cleanup SA state LUT */ + error_mask &= ((~OSI_BIT(1)) & (0xFFU)); + osi_memset(&lut_config, 0, sizeof(lut_config)); + table_config = &lut_config.table_config; + table_config->ctlr_sel = ctlr; + table_config->rw = OSI_LUT_WRITE; + lut_config.lut_sel = OSI_LUT_SEL_SA_STATE; + table_config->index = (nveu16_t)(((sc->sc_idx_start & 0xFU) * + OSI_MAX_NUM_SA) + sc->curr_an); + ret_fail = macsec_lut_config(osi_core, &lut_config); + print_error(osi_core, ret_fail); + } +#ifdef MACSEC_KEY_PROGRAM + if ((error_mask & OSI_BIT(0)) != OSI_NONE) { + error_mask &= ((~OSI_BIT(0)) & (0xFFU)); + osi_memset(&kt_config, 0, sizeof(kt_config)); + table_config = &kt_config.table_config; + table_config->ctlr_sel = ctlr; + table_config->rw = OSI_LUT_WRITE; + table_config->index = *kt_idx; + ret_fail = macsec_kt_config(osi_core, &kt_config); + print_error(osi_core, ret_fail); + } +#endif /* MACSEC_KEY_PROGRAM */ +} + /** * @brief add_upd_sc - add or update an SC * @@ -4823,7 +5013,7 @@ static void copy_rev_order(nveu8_t *dst_buff, const nveu8_t *src_buff, nveu16_t * - TraceID: *********** * * @param[in] osi_core: OSI core private data structure. used param macsec_base - * @param[in] existing_sc: Pointer to the existing sc + * @param[in] sc: Pointer to the existing sc * @param[in] ctlr: Controller to be selected * @param[out] kt_idx: Key index to be passed to osd * @@ -4839,13 +5029,14 @@ static void copy_rev_order(nveu8_t *dst_buff, const nveu8_t *src_buff, nveu16_t * @retval -1 on failure */ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, - struct osi_macsec_sc_info *const sc, - nveu16_t ctlr, nveu16_t *kt_idx) + const struct osi_macsec_sc_info *const sc, + nveu16_t ctlr, nveu16_t *kt_idx) { struct osi_macsec_lut_config lut_config = {0}; struct osi_macsec_table_config *table_config; - nve32_t ret; + nve32_t ret = 0; nveu32_t i; + nveu8_t error_mask = 0; #ifdef MACSEC_KEY_PROGRAM struct osi_macsec_kt_config kt_config = {0}; #endif /* MACSEC_KEY_PROGRAM */ @@ -4870,7 +5061,8 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set SAK\n", (nveul64_t)ret); - return -1; + ret = -1; + goto exit; } } #endif /* MACSEC_KEY_PROGRAM */ @@ -4890,13 +5082,14 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set SA state\n", (nveul64_t)ret); - goto err_sa_state; + error_mask |= OSI_BIT(0); + goto exit; } /* 3. SC param LUT */ lut_config.flags = OSI_NONE; lut_config.lut_sel = OSI_LUT_SEL_SC_PARAM; - table_config->index = (nveu16_t)(sc->sc_idx_start); + table_config->index = (nveu16_t)(sc->sc_idx_start & 0xFFU); copy_rev_order(lut_config.sc_param_out.sci, sc->sci, OSI_SCI_LEN); lut_config.sc_param_out.key_index_start = ((sc->sc_idx_start & 0xFU) * @@ -4910,7 +5103,8 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set SC param\n", (nveul64_t)ret); - goto err_sc_param; + error_mask |= OSI_BIT(1); + goto exit; } /* 4. SCI LUT */ @@ -4931,7 +5125,8 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set SCI LUT\n", (nveul64_t)ret); - goto err_sci; + error_mask |= OSI_BIT(2); + goto exit; } if (sc->flags == OSI_ENABLE_SA) { @@ -4944,56 +5139,13 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to set SC state\n", (nveul64_t)ret); - goto err_sc_state; + error_mask |= OSI_BIT(3); + goto exit; } } - return 0; - -err_sc_state: - /* Cleanup SCI LUT */ - osi_memset(&lut_config, 0, sizeof(lut_config)); - table_config = &lut_config.table_config; - table_config->ctlr_sel = ctlr; - table_config->rw = OSI_LUT_WRITE; - lut_config.lut_sel = OSI_LUT_SEL_SCI; - table_config->index = (nveu16_t)(sc->sc_idx_start); - ret = macsec_lut_config(osi_core, &lut_config); - print_error(osi_core, ret); - -err_sci: - /* cleanup SC param */ - osi_memset(&lut_config, 0, sizeof(lut_config)); - table_config = &lut_config.table_config; - table_config->ctlr_sel = ctlr; - lut_config.lut_sel = OSI_LUT_SEL_SC_PARAM; - table_config->index = (nveu16_t)(sc->sc_idx_start); - ret = macsec_lut_config(osi_core, &lut_config); - print_error(osi_core, ret); - -err_sc_param: - /* Cleanup SA state LUT */ - osi_memset(&lut_config, 0, sizeof(lut_config)); - table_config = &lut_config.table_config; - table_config->ctlr_sel = ctlr; - table_config->rw = OSI_LUT_WRITE; - lut_config.lut_sel = OSI_LUT_SEL_SA_STATE; - table_config->index = (nveu16_t)(((sc->sc_idx_start & 0xFU) * - OSI_MAX_NUM_SA) + sc->curr_an); - ret = macsec_lut_config(osi_core, &lut_config); - print_error(osi_core, ret); - -err_sa_state: -#ifdef MACSEC_KEY_PROGRAM - osi_memset(&kt_config, 0, sizeof(kt_config)); - table_config = &kt_config.table_config; - table_config->ctlr_sel = ctlr; - table_config->rw = OSI_LUT_WRITE; - table_config->index = *kt_idx; - ret = macsec_kt_config(osi_core, &kt_config); - print_error(osi_core, ret); -#endif /* MACSEC_KEY_PROGRAM */ - - return -1; +exit: + add_upd_sc_err_cleanup(osi_core, error_mask, ctlr, sc); + return ret; } /** @@ -5023,13 +5175,15 @@ static nve32_t add_upd_sc(struct osi_core_priv_data *const osi_core, static nve32_t macsec_config_validate_inputs(nveu32_t enable, nveu16_t ctlr, const nveu16_t *kt_idx) { + nve32_t ret = 0; + /* Validate inputs */ if (((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) || ((ctlr != OSI_CTLR_SEL_TX) && (ctlr != OSI_CTLR_SEL_RX)) || (kt_idx == OSI_NULL)) { - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -5122,21 +5276,24 @@ static nve32_t add_new_sc(struct osi_core_priv_data *const osi_core, if (lut_status_ptr->num_of_sc_used >= OSI_MAX_NUM_SC) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Err: Reached max SC LUT entries!\n", 0ULL); - return -1; + ret = -1; + goto exit; } avail_sc_idx = get_avail_sc_idx(osi_core, ctlr); if (avail_sc_idx == OSI_MAX_NUM_SC) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Err: NO free SC Index\n", 0ULL); - return -1; + ret = -1; + goto exit; } new_sc = &lut_status_ptr->sc_info[avail_sc_idx]; ret = memcpy_sci_sak_hkey(new_sc, sc); if (ret < OSI_NONE_SIGNED) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "memcpy Failed\n", 0ULL); - return -1; + ret = -1; + goto exit; } new_sc->curr_an = sc->curr_an; new_sc->next_pn = sc->next_pn; @@ -5150,20 +5307,22 @@ static nve32_t add_new_sc(struct osi_core_priv_data *const osi_core, OSI_NONE_SIGNED) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "failed to add new SC\n", 0ULL); - return -1; + ret = -1; + goto exit; } else { /* Update lut status */ lut_status_ptr->num_of_sc_used++; - LOG("%s: Added new SC ctlr: %u " + MACSEC_LOG("%s: Added new SC ctlr: %u " "Total active SCs: %u", __func__, ctlr, lut_status_ptr->num_of_sc_used); - return 0; } +exit: + return ret; } /** - * @brief config_macsec - API to update LUTs for addition/deletion of SC/SA + * @brief macsec_configure - API to update LUTs for addition/deletion of SC/SA * * @note * Algorithm: @@ -5180,6 +5339,7 @@ static nve32_t add_new_sc(struct osi_core_priv_data *const osi_core, * * @param[in] osi_core: OSI core private data structure. used param macsec_base * @param[in] sc: Pointer to the sc that need to be added/deleted/updated + * @param[in] enable: enable or disable * @param[in] ctlr: Controller to be selected * @param[out] kt_idx: Key index to be passed to osd * @@ -5194,21 +5354,22 @@ static nve32_t add_new_sc(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure */ -static nve32_t config_macsec(struct osi_core_priv_data *const osi_core, - struct osi_macsec_sc_info *const sc, - nveu32_t enable, nveu16_t ctlr, - nveu16_t *kt_idx) +static nve32_t macsec_configure(struct osi_core_priv_data *const osi_core, + struct osi_macsec_sc_info *const sc, + nveu32_t enable, nveu16_t ctlr, + nveu16_t *kt_idx) { struct osi_macsec_sc_info *existing_sc = OSI_NULL; struct osi_macsec_sc_info tmp_sc; struct osi_macsec_sc_info *tmp_sc_p = &tmp_sc; struct osi_macsec_lut_status *lut_status_ptr; - nve32_t ret; + nve32_t ret = 0; if (macsec_config_validate_inputs(enable, ctlr, kt_idx) < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Input validation failed\n", 0ULL); - return -1; + ret = -1; + goto exit; } lut_status_ptr = &osi_core->macsec_lut_status[ctlr]; @@ -5219,20 +5380,23 @@ static nve32_t config_macsec(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "trying to delete non-existing SC ?\n", 0ULL); - return -1; + ret = -1; + goto exit; } else { - LOG("%s: Adding new SC/SA: ctlr: %hu", __func__, ctlr); - return add_new_sc(osi_core, sc, ctlr, kt_idx); + MACSEC_LOG("%s: Adding new SC/SA: ctlr: %hu", __func__, ctlr); + ret = add_new_sc(osi_core, sc, ctlr, kt_idx); + goto exit; } } else { - LOG("%s: Updating existing SC", __func__); + MACSEC_LOG("%s: Updating existing SC", __func__); if (enable == OSI_DISABLE) { - LOG("%s: Deleting existing SA", __func__); + MACSEC_LOG("%s: Deleting existing SA", __func__); if (del_upd_sc(osi_core, existing_sc, sc, ctlr, kt_idx) != OSI_NONE_SIGNED) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "failed to del SA\n", 0ULL); - return -1; + ret = -1; + goto exit; } else { if ((existing_sc->an_valid == OSI_NONE) && (lut_status_ptr->num_of_sc_used != OSI_NONE)) { @@ -5241,7 +5405,7 @@ static nve32_t config_macsec(struct osi_core_priv_data *const osi_core, sizeof(*existing_sc)); } - return 0; + goto exit; } } else { /* Take backup copy. @@ -5253,7 +5417,8 @@ static nve32_t config_macsec(struct osi_core_priv_data *const osi_core, if (ret < OSI_NONE_SIGNED) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "memcpy Failed\n", 0ULL); - return -1; + ret = -1; + goto exit; } tmp_sc_p->curr_an = sc->curr_an; tmp_sc_p->next_pn = sc->next_pn; @@ -5266,18 +5431,20 @@ static nve32_t config_macsec(struct osi_core_priv_data *const osi_core, OSI_NONE_SIGNED) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "failed to add new SA\n", 0ULL); - return -1; + ret = -1; + goto exit; } else { - LOG("%s: Updated new SC ctlr: %u " + MACSEC_LOG("%s: Updated new SC ctlr: %u " "Total active SCs: %u", __func__, ctlr, lut_status_ptr->num_of_sc_used); /* Now commit the changes */ *existing_sc = *tmp_sc_p; - return 0; } } } +exit: + return ret; } /** @@ -5308,24 +5475,27 @@ static nve32_t config_macsec(struct osi_core_priv_data *const osi_core, nve32_t osi_init_macsec_ops(struct osi_core_priv_data *const osi_core) { static struct osi_macsec_core_ops virt_macsec_ops; + nve32_t ret = 0; static struct osi_macsec_core_ops macsec_ops = { - .init = macsec_init, + .init = macsec_initialize, .deinit = macsec_deinit, - .handle_ns_irq = macsec_handle_ns_irq, - .handle_s_irq = macsec_handle_s_irq, + .handle_irq = macsec_handle_irq, .lut_config = macsec_lut_config, #ifdef MACSEC_KEY_PROGRAM .kt_config = macsec_kt_config, #endif /* MACSEC_KEY_PROGRAM */ .cipher_config = macsec_cipher_config, - .loopback_config = macsec_loopback_config, .macsec_en = macsec_enable, - .config = config_macsec, + .config = macsec_configure, .read_mmc = macsec_read_mmc, - .dbg_buf_config = macsec_dbg_buf_config, - .dbg_events_config = macsec_dbg_events_config, .get_sc_lut_key_index = macsec_get_key_index, .update_mtu = macsec_update_mtu, +#ifdef DEBUG_MACSEC + .loopback_config = macsec_loopback_config, + .dbg_buf_config = macsec_dbg_buf_config, + .dbg_events_config = macsec_dbg_events_config, + .intr_config = macsec_intr_config, +#endif }; if (osi_core->use_virtualization == OSI_ENABLE) { @@ -5333,11 +5503,13 @@ nve32_t osi_init_macsec_ops(struct osi_core_priv_data *const osi_core) ivc_init_macsec_ops(osi_core->macsec_ops); } else { if (osi_core->macsec_base == OSI_NULL) { - return -1; + ret = -1; + goto exit; } osi_core->macsec_ops = &macsec_ops; } - return 0; +exit: + return ret; } /** @@ -5368,12 +5540,14 @@ nve32_t osi_init_macsec_ops(struct osi_core_priv_data *const osi_core) nve32_t osi_macsec_init(struct osi_core_priv_data *const osi_core, nveu32_t mtu) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->init != OSI_NULL)) { - return osi_core->macsec_ops->init(osi_core, mtu); + ret = osi_core->macsec_ops->init(osi_core, mtu); } - return -1; + return ret; } /** @@ -5401,48 +5575,22 @@ nve32_t osi_macsec_init(struct osi_core_priv_data *const osi_core, */ nve32_t osi_macsec_deinit(struct osi_core_priv_data *const osi_core) { - if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && - (osi_core->macsec_ops->deinit != OSI_NULL)) { - return osi_core->macsec_ops->deinit(osi_core); - } - return -1; -} + nve32_t ret = -1; -/** - * @brief osi_macsec_ns_isr - macsec non-secure irq handler - * - * @note - * Algorithm: - * - Return -1 if osi core or ops is null - * - handles non-secure macsec interrupts - * - Refer to MACSEC column of <<******, (sequence diagram)>> for API details. - * - TraceID: *********** - * - * @param[in] osi_core: OSI core private data structure - * - * @pre MACSEC needs to be out of reset and proper clock configured. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -void osi_macsec_ns_isr(struct osi_core_priv_data *const osi_core) -{ if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && - (osi_core->macsec_ops->handle_ns_irq != OSI_NULL)) { - osi_core->macsec_ops->handle_ns_irq(osi_core); + (osi_core->macsec_ops->deinit != OSI_NULL)) { + ret = osi_core->macsec_ops->deinit(osi_core); } + return ret; } /** - * @brief osi_macsec_s_isr - macsec secure irq handler + * @brief osi_macsec_isr - macsec irq handler * * @note * Algorithm: * - Return -1 if osi core or ops is null - * - handles secure macsec interrupts + * - handles macsec interrupts * - Refer to MACSEC column of <<******, (sequence diagram)>> for API details. * - TraceID: *********** * @@ -5456,11 +5604,11 @@ void osi_macsec_ns_isr(struct osi_core_priv_data *const osi_core) * - Run time: Yes * - De-initialization: No */ -void osi_macsec_s_isr(struct osi_core_priv_data *const osi_core) +void osi_macsec_isr(struct osi_core_priv_data *const osi_core) { if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && - (osi_core->macsec_ops->handle_s_irq != OSI_NULL)) { - osi_core->macsec_ops->handle_s_irq(osi_core); + (osi_core->macsec_ops->handle_irq != OSI_NULL)) { + osi_core->macsec_ops->handle_irq(osi_core); } } @@ -5491,12 +5639,14 @@ void osi_macsec_s_isr(struct osi_core_priv_data *const osi_core) nve32_t osi_macsec_config_lut(struct osi_core_priv_data *const osi_core, struct osi_macsec_lut_config *const lut_config) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->lut_config != OSI_NULL)) { - return osi_core->macsec_ops->lut_config(osi_core, lut_config); + ret = osi_core->macsec_ops->lut_config(osi_core, lut_config); } - return -1; + return ret; } /** @@ -5529,13 +5679,15 @@ nve32_t osi_macsec_get_sc_lut_key_index(struct osi_core_priv_data *const osi_cor nveu8_t *sci, nveu32_t *key_index, nveu16_t ctlr) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->get_sc_lut_key_index != OSI_NULL)) { - return osi_core->macsec_ops->get_sc_lut_key_index(osi_core, sci, key_index, + ret = osi_core->macsec_ops->get_sc_lut_key_index(osi_core, sci, key_index, ctlr); } - return -1; + return ret; } /** @@ -5565,12 +5717,14 @@ nve32_t osi_macsec_get_sc_lut_key_index(struct osi_core_priv_data *const osi_cor nve32_t osi_macsec_update_mtu(struct osi_core_priv_data *const osi_core, nveu32_t mtu) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->update_mtu != OSI_NULL)) { - return osi_core->macsec_ops->update_mtu(osi_core, mtu); + ret = osi_core->macsec_ops->update_mtu(osi_core, mtu); } - return -1; + return ret; } #ifdef MACSEC_KEY_PROGRAM @@ -5601,13 +5755,15 @@ nve32_t osi_macsec_update_mtu(struct osi_core_priv_data *const osi_core, nve32_t osi_macsec_config_kt(struct osi_core_priv_data *const osi_core, struct osi_macsec_kt_config *const kt_config) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->kt_config != OSI_NULL) && (kt_config != OSI_NULL)) { - return osi_core->macsec_ops->kt_config(osi_core, kt_config); + ret = osi_core->macsec_ops->kt_config(osi_core, kt_config); } - return -1; + return ret; } #endif /* MACSEC_KEY_PROGRAM */ @@ -5638,14 +5794,17 @@ nve32_t osi_macsec_config_kt(struct osi_core_priv_data *const osi_core, nve32_t osi_macsec_cipher_config(struct osi_core_priv_data *const osi_core, nveu32_t cipher) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->cipher_config != OSI_NULL)) { - return osi_core->macsec_ops->cipher_config(osi_core, cipher); + ret = osi_core->macsec_ops->cipher_config(osi_core, cipher); } - return -1; + return ret; } +#ifdef DEBUG_MACSEC /** * @brief osi_macsec_loopback - API to enable/disable macsec loopback * @@ -5673,14 +5832,16 @@ nve32_t osi_macsec_cipher_config(struct osi_core_priv_data *const osi_core, nve32_t osi_macsec_loopback(struct osi_core_priv_data *const osi_core, nveu32_t enable) { + nve32_t ret = -1; if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->loopback_config != OSI_NULL)) { - return osi_core->macsec_ops->loopback_config(osi_core, enable); + ret = osi_core->macsec_ops->loopback_config(osi_core, enable); } - return -1; + return ret; } +#endif /* DEBUG_MACSEC */ /** * @brief osi_macsec_en - API to enable/disable macsec @@ -5710,18 +5871,20 @@ nve32_t osi_macsec_loopback(struct osi_core_priv_data *const osi_core, nve32_t osi_macsec_en(struct osi_core_priv_data *const osi_core, nveu32_t enable) { + nve32_t ret = -1; + if (((enable & OSI_MACSEC_TX_EN) != OSI_MACSEC_TX_EN) && ((enable & OSI_MACSEC_RX_EN) != OSI_MACSEC_RX_EN) && (enable != OSI_DISABLE)) { - return -1; + goto exit; } if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->macsec_en != OSI_NULL)) { - return osi_core->macsec_ops->macsec_en(osi_core, enable); + ret = osi_core->macsec_ops->macsec_en(osi_core, enable); } - - return -1; +exit: + return ret; } /** @@ -5737,6 +5900,7 @@ nve32_t osi_macsec_en(struct osi_core_priv_data *const osi_core, * * @param[in] osi_core: OSI core private data structure * @param[in] sc: Pointer to the sc that needs to be added/deleted/updated + * @param[in] enable: enable or disable * @param[in] ctlr: Controller selected * @param[out] kt_idx: Pointer to the kt_index passed to OSD * @@ -5756,18 +5920,20 @@ nve32_t osi_macsec_config(struct osi_core_priv_data *const osi_core, nveu32_t enable, nveu16_t ctlr, nveu16_t *kt_idx) { + nve32_t ret = -1; + if (((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) || (ctlr > OSI_CTLR_SEL_MAX) || (kt_idx == OSI_NULL)) { - return -1; + goto exit; } if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->config != OSI_NULL) && (sc != OSI_NULL)) { - return osi_core->macsec_ops->config(osi_core, sc, + ret = osi_core->macsec_ops->config(osi_core, sc, enable, ctlr, kt_idx); } - - return -1; +exit: + return ret; } /** @@ -5795,15 +5961,17 @@ nve32_t osi_macsec_config(struct osi_core_priv_data *const osi_core, */ nve32_t osi_macsec_read_mmc(struct osi_core_priv_data *const osi_core) { + nve32_t ret = -1; + if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->read_mmc != OSI_NULL)) { osi_core->macsec_ops->read_mmc(osi_core); - return 0; + ret = 0; } - - return -1; + return ret; } +#ifdef DEBUG_MACSEC /** * @brief osi_macsec_config_dbg_buf - Reads the debug buffer captured * @@ -5832,14 +6000,15 @@ nve32_t osi_macsec_config_dbg_buf( struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config) { + nve32_t ret = -1; if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->dbg_buf_config != OSI_NULL)) { - return osi_core->macsec_ops->dbg_buf_config(osi_core, + ret = osi_core->macsec_ops->dbg_buf_config(osi_core, dbg_buf_config); } - return -1; + return ret; } /** @@ -5870,14 +6039,16 @@ nve32_t osi_macsec_dbg_events_config( struct osi_core_priv_data *const osi_core, struct osi_macsec_dbg_buf_config *const dbg_buf_config) { + nve32_t ret = -1; if ((osi_core != OSI_NULL) && (osi_core->macsec_ops != OSI_NULL) && (osi_core->macsec_ops->dbg_events_config != OSI_NULL)) { - return osi_core->macsec_ops->dbg_events_config(osi_core, + ret = osi_core->macsec_ops->dbg_events_config(osi_core, dbg_buf_config); } - return -1; + return ret; } +#endif /* DEBUG_MACSEC */ #endif /* MACSEC_SUPPORT */ diff --git a/kernel/nvethernetrm/osi/core/macsec.h b/kernel/nvethernetrm/osi/core/macsec.h index 7d027d0ca8..aabe9a233b 100644 --- a/kernel/nvethernetrm/osi/core/macsec.h +++ b/kernel/nvethernetrm/osi/core/macsec.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -48,8 +48,10 @@ * @brief MACsec controller register offsets * @{ */ +#ifdef MACSEC_KEY_PROGRAM #define MACSEC_GCM_KEYTABLE_CONFIG 0x0000 #define MACSEC_GCM_KEYTABLE_DATA(x) ((0x0004U) + ((x) * 4U)) +#endif /* MACSEC_KEY_PROGRAM */ #define MACSEC_RX_ICV_ERR_CNTRL 0x4000 #define MACSEC_INTERRUPT_COMMON_SR 0x4004 #define MACSEC_TX_IMR 0x4008 @@ -89,7 +91,6 @@ #define MACSEC_TX_SCI_LUT_VALID 0xD028 #define MACSEC_RX_BYP_LUT_VALID 0xD02C #define MACSEC_RX_SCI_LUT_VALID 0xD030 - #define MACSEC_COMMON_IMR 0xD054 #define MACSEC_COMMON_ISR 0xD058 #define MACSEC_TX_SC_KEY_INVALID_STS0_0 0xD064 @@ -97,14 +98,16 @@ #define MACSEC_RX_SC_KEY_INVALID_STS0_0 0xD080 #define MACSEC_RX_SC_KEY_INVALID_STS1_0 0xD084 -#define MACSEC_TX_DEBUG_CONTROL_0 0xD098 -#define MACSEC_TX_DEBUG_TRIGGER_EN_0 0xD09C #define MACSEC_TX_DEBUG_STATUS_0 0xD0C4 +#define MACSEC_TX_DEBUG_TRIGGER_EN_0 0xD09C +#define MACSEC_RX_DEBUG_STATUS_0 0xD0F8 +#define MACSEC_RX_DEBUG_TRIGGER_EN_0 0xD0E0 +#ifdef DEBUG_MACSEC +#define MACSEC_TX_DEBUG_CONTROL_0 0xD098 #define MACSEC_DEBUG_BUF_CONFIG_0 0xD0C8 #define MACSEC_DEBUG_BUF_DATA_0(x) ((0xD0CCU) + ((x) * 4U)) #define MACSEC_RX_DEBUG_CONTROL_0 0xD0DC -#define MACSEC_RX_DEBUG_TRIGGER_EN_0 0xD0E0 -#define MACSEC_RX_DEBUG_STATUS_0 0xD0F8 +#endif /* DEBUG_MACSEC */ #define MACSEC_CONTROL1 0xE000 #define MACSEC_GCM_AES_CONTROL_0 0xE004 @@ -114,6 +117,7 @@ #define MACSEC_RX_SOT_DELAY 0xE01C /** @} */ +#ifdef MACSEC_KEY_PROGRAM /** * @addtogroup MACSEC_GCM_KEYTABLE_CONFIG register * @@ -138,6 +142,7 @@ #define MACSEC_KT_DATA_REG_SAK_CNT 8U #define MACSEC_KT_DATA_REG_H_CNT 4U /** @} */ +#endif /* MACSEC_KEY_PROGRAM */ /** * @addtogroup MACSEC_LUT_CONFIG register @@ -188,7 +193,9 @@ * @brief Bit definitions of MACSEC_CONTROL1 register * @{ */ +#ifdef DEBUG_MACSEC #define MACSEC_LOOPBACK_MODE_EN OSI_BIT(31) +#endif /* DEBUG_MACSEC */ #define MACSEC_RX_MTU_CHECK_EN OSI_BIT(16) #define MACSEC_TX_LUT_PRIO_BYP OSI_BIT(2) #define MACSEC_TX_MTU_CHECK_EN OSI_BIT(0) @@ -215,10 +222,12 @@ * @{ */ #define MACSEC_SECURE_REG_VIOL_INT_EN OSI_BIT(31) +#ifdef DEBUG_MACSEC #define MACSEC_RX_UNINIT_KEY_SLOT_INT_EN OSI_BIT(17) #define MACSEC_RX_LKUP_MISS_INT_EN OSI_BIT(16) #define MACSEC_TX_UNINIT_KEY_SLOT_INT_EN OSI_BIT(1) #define MACSEC_TX_LKUP_MISS_INT_EN OSI_BIT(0) +#endif /* DEBUG_MACSEC */ /** @} */ /** @@ -227,11 +236,12 @@ * @brief Bit definitions of TX_INTERRUPT_MASK register * @{ */ +#define MACSEC_TX_MAC_CRC_ERROR_INT_EN OSI_BIT(16) +#ifdef DEBUG_MACSEC #define MACSEC_TX_DBG_BUF_CAPTURE_DONE_INT_EN OSI_BIT(22) #define MACSEC_TX_MTU_CHECK_FAIL_INT_EN OSI_BIT(19) #define MACSEC_TX_AES_GCM_BUF_OVF_INT_EN OSI_BIT(18) #define MACSEC_TX_SC_AN_NOT_VALID_INT_EN OSI_BIT(17) -#define MACSEC_TX_MAC_CRC_ERROR_INT_EN OSI_BIT(16) #define MACSEC_TX_PN_EXHAUSTED_INT_EN OSI_BIT(1) #define MACSEC_TX_PN_THRSHLD_RCHD_INT_EN OSI_BIT(0) /** @} */ @@ -243,12 +253,13 @@ * @{ */ #define MACSEC_RX_DBG_BUF_CAPTURE_DONE_INT_EN OSI_BIT(22) -#define MACSEC_RX_ICV_ERROR_INT_EN OSI_BIT(21) #define RX_REPLAY_ERROR_INT_EN OSI_BIT(20) #define MACSEC_RX_MTU_CHECK_FAIL_INT_EN OSI_BIT(19) #define MACSEC_RX_AES_GCM_BUF_OVF_INT_EN OSI_BIT(18) -#define MACSEC_RX_MAC_CRC_ERROR_INT_EN OSI_BIT(16) #define MACSEC_RX_PN_EXHAUSTED_INT_EN OSI_BIT(1) +#endif /* DEBUG_MACSEC */ +#define MACSEC_RX_ICV_ERROR_INT_EN OSI_BIT(21) +#define MACSEC_RX_MAC_CRC_ERROR_INT_EN OSI_BIT(16) /** @} */ /** @@ -264,6 +275,16 @@ #define MACSEC_TX_LKUP_MISS OSI_BIT(0) /** @} */ +/** + * @addtogroup MACSEC_STATS_CONTROL_0 register + * + * @brief Bit definitions of MACSEC_STATS_CONTROL_0 register + * @{ + */ +#define MACSEC_STATS_CONTROL0_CNT_RL_OVR_CPY OSI_BIT(1) +/** @} */ + + /** * @addtogroup MACSEC_TX_ISR register * @@ -294,15 +315,7 @@ #define MACSEC_RX_PN_EXHAUSTED OSI_BIT(1) /** @} */ -/** - * @addtogroup MACSEC_STATS_CONTROL_0 register - * - * @brief Bit definitions of MACSEC_STATS_CONTROL_0 register - * @{ - */ -#define MACSEC_STATS_CONTROL0_CNT_RL_OVR_CPY OSI_BIT(1) -/** @} */ - +#ifdef DEBUG_MACSEC /** * @addtogroup MACSEC_DEBUG_BUF_CONFIG_0 register * @@ -361,21 +374,14 @@ */ #define MACSEC_RX_DEBUG_CONTROL_0_START_CAP OSI_BIT(31) /** @} */ +#endif /* DEBUG_MACSEC */ #define MTU_LENGTH_MASK 0xFFFFU #define SOT_LENGTH_MASK 0xFFU #define EQOS_MACSEC_SOT_DELAY 0x4EU /** - * @addtogroup TX/RX_BYP/SCI_LUT_VALID register - * - * @brief Bit definitions of LUT_VALID registers - * @{ - */ -/** @} */ - -/** - * @addtogroup TX/RX LUT bit fields in LUT_DATA registers + * @addtogroup MACSEC-LUT TX/RX LUT bit fields in LUT_DATA registers * * @brief Helper macros for LUT data programming * @{ @@ -439,8 +445,21 @@ #define MACSEC_RX_SCI_LUT_PREEMPT_INACTIVE OSI_BIT(9) /** @} */ +#ifdef DEBUG_MACSEC /* debug buffer data read/write length */ #define DBG_BUF_LEN 4U +#endif /* DEBUG_MACSEC */ +#ifdef MACSEC_KEY_PROGRAM #define INTEGER_LEN 4U +#endif /* MACSEC_KEY_PROGRAM */ + +#ifdef HSI_SUPPORT +/* Set RX ISR set interrupt status bit */ +#define MACSEC_RX_ISR_SET 0x4050U +/* Set TX ISR set interrupt status bit */ +#define MACSEC_TX_ISR_SET 0x4010U +/* Set Common ISR set interrupt status bit */ +#define MACSEC_COMMON_ISR_SET 0xd05cU +#endif #endif /* INCLUDED_MACSEC_H */ diff --git a/kernel/nvethernetrm/osi/core/mgbe_core.c b/kernel/nvethernetrm/osi/core/mgbe_core.c index 18d03e23a9..ebb0660dc5 100644 --- a/kernel/nvethernetrm/osi/core/mgbe_core.c +++ b/kernel/nvethernetrm/osi/core/mgbe_core.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -21,7 +21,6 @@ */ #include "../osi/common/common.h" -#include "../osi/common/type.h" #include #include #include @@ -29,254 +28,8 @@ #include "core_local.h" #include "xpcs.h" #include "mgbe_mmc.h" -#include "vlan_filter.h" #include "core_common.h" - -/** - * @brief mgbe_ptp_tsc_capture - read PTP and TSC registers - * - * Algorithm: - * - write 1 to MGBE_WRAP_SYNC_TSC_PTP_CAPTURE_0 - * - wait till MGBE_WRAP_SYNC_TSC_PTP_CAPTURE_0 is 0x0 - * - read and return following registers - * MGBE_WRAP _TSC_CAPTURE_LOW_0 - * MGBE_WRAP _TSC_CAPTURE_HIGH_0 - * MGBE_WRAP _PTP_CAPTURE_LOW_0 - * MGBE_WRAP _PTP_CAPTURE_HIGH_0 - * - * @param[in] base: MGBE virtual base address. - * @param[out]: osi_core_ptp_tsc_data register - * - * @note MAC needs to be out of reset and proper clock configured. TSC and PTP - * registers should be configured. - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t mgbe_ptp_tsc_capture(struct osi_core_priv_data *const osi_core, - struct osi_core_ptp_tsc_data *data) -{ - nveu32_t retry = 20U; - nveu32_t count = 0U, val = 0U; - nve32_t cond = COND_NOT_MET; - nve32_t ret = -1; - - osi_writela(osi_core, OSI_ENABLE, (nveu8_t *)osi_core->base + - MGBE_WRAP_SYNC_TSC_PTP_CAPTURE); - - /* Poll Until Poll Condition */ - while (cond == COND_NOT_MET) { - if (count > retry) { - /* Max retries reached */ - goto done; - } - - count++; - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_SYNC_TSC_PTP_CAPTURE); - if ((val & OSI_ENABLE) == OSI_NONE) { - cond = COND_MET; - } else { - /* delay if SWR is set */ - osi_core->osd_ops.udelay(1U); - } - } - - data->tsc_low_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_TSC_CAPTURE_LOW); - data->tsc_high_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_TSC_CAPTURE_HIGH); - data->ptp_low_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_PTP_CAPTURE_LOW); - data->ptp_high_bits = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_WRAP_PTP_CAPTURE_HIGH); - ret = 0; -done: - return ret; -} - -/** - * @brief mgbe_config_fw_err_pkts - Configure forwarding of error packets - * - * Algorithm: When FEP bit is reset, the Rx queue drops packets with - * error status (CRC error, GMII_ER, watchdog timeout, or overflow). - * When FEP bit is set, all packets except the runt error packets - * are forwarded to the application or DMA. - * - * @param[in] addr: Base address indicating the start of memory mapped IO - * region of the MAC. - * @param[in] qinx: Q index - * @param[in] enable_fw_err_pkts: Enable or Disable the forwarding of error - * packets - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_config_fw_err_pkts(struct osi_core_priv_data *osi_core, - const unsigned int qinx, - const unsigned int enable_fw_err_pkts) -{ - unsigned int val; - - /* Check for valid enable_fw_err_pkts and qinx values */ - if ((enable_fw_err_pkts!= OSI_ENABLE && - enable_fw_err_pkts != OSI_DISABLE) || - (qinx >= OSI_MGBE_MAX_NUM_CHANS)) { - return -1; - } - - /* Read MTL RXQ Operation_Mode Register */ - val = osi_readla(osi_core, (unsigned char *)osi_core->base + - MGBE_MTL_CHX_RX_OP_MODE(qinx)); - - /* enable_fw_err_pkts, 1 is for enable and 0 is for disable */ - if (enable_fw_err_pkts == OSI_ENABLE) { - /* When enable_fw_err_pkts bit is set, all packets except - * the runt error packets are forwarded to the application - * or DMA. - */ - val |= MGBE_MTL_RXQ_OP_MODE_FEP; - } else { - /* When this bit is reset, the Rx queue drops packets with error - * status (CRC error, GMII_ER, watchdog timeout, or overflow) - */ - val &= ~MGBE_MTL_RXQ_OP_MODE_FEP; - } - - /* Write to FEP bit of MTL RXQ Operation Mode Register to enable or - * disable the forwarding of error packets to DMA or application. - */ - osi_writela(osi_core, val, (unsigned char *)osi_core->base + - MGBE_MTL_CHX_RX_OP_MODE(qinx)); - - return 0; -} - -/** - * @brief mgbe_poll_for_swr - Poll for software reset (SWR bit in DMA Mode) - * - * Algorithm: CAR reset will be issued through MAC reset pin. - * Waits for SWR reset to be cleared in DMA Mode register. - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC needs to be out of reset and proper clock configured. - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t mgbe_poll_for_swr(struct osi_core_priv_data *const osi_core) -{ - void *addr = osi_core->base; - nveu32_t retry = 1000; - nveu32_t count; - nveu32_t dma_bmr = 0; - nve32_t cond = 1; - nveu32_t pre_si = osi_core->pre_si; - - /* Performing software reset */ - if (pre_si == OSI_ENABLE) { - osi_writela(osi_core, OSI_ENABLE, - (nveu8_t *)addr + MGBE_DMA_MODE); - } - - /* Poll Until Poll Condition */ - count = 0; - while (cond == 1) { - if (count > retry) { - return -1; - } - - count++; - - dma_bmr = osi_readla(osi_core, (nveu8_t *)addr + MGBE_DMA_MODE); - if ((dma_bmr & MGBE_DMA_MODE_SWR) == OSI_NONE) { - cond = 0; - } else { - /* sleep if SWR is set */ - osi_core->osd_ops.msleep(1U); - } - } - - return 0; -} - -/** - * @brief mgbe_calculate_per_queue_fifo - Calculate per queue FIFO size - * - * Algorithm: Total Tx/Rx FIFO size which is read from - * MAC HW is being shared equally among the queues that are - * configured. - * - * @param[in] fifo_size: Total Tx/RX HW FIFO size. - * @param[in] queue_count: Total number of Queues configured. - * - * @note MAC has to be out of reset. - * - * @retval Queue size that need to be programmed. - */ -static nveu32_t mgbe_calculate_per_queue_fifo(nveu32_t fifo_size, - nveu32_t queue_count) -{ - nveu32_t q_fifo_size = 0; /* calculated fifo size per queue */ - nveu32_t p_fifo = 0; /* per queue fifo size program value */ - - if (queue_count == 0U) { - return 0U; - } - - /* calculate Tx/Rx fifo share per queue */ - switch (fifo_size) { - case 0: - case 1: - case 2: - case 3: - q_fifo_size = FIFO_SIZE_KB(1U); - break; - case 4: - q_fifo_size = FIFO_SIZE_KB(2U); - break; - case 5: - q_fifo_size = FIFO_SIZE_KB(4U); - break; - case 6: - q_fifo_size = FIFO_SIZE_KB(8U); - break; - case 7: - q_fifo_size = FIFO_SIZE_KB(16U); - break; - case 8: - q_fifo_size = FIFO_SIZE_KB(32U); - break; - case 9: - q_fifo_size = FIFO_SIZE_KB(64U); - break; - case 10: - q_fifo_size = FIFO_SIZE_KB(128U); - break; - case 11: - q_fifo_size = FIFO_SIZE_KB(256U); - break; - case 12: - /* Size mapping not found for 192KB, so assigned 12 */ - q_fifo_size = FIFO_SIZE_KB(192U); - break; - default: - q_fifo_size = FIFO_SIZE_KB(1U); - break; - } - - q_fifo_size = q_fifo_size / queue_count; - - if (q_fifo_size < UINT_MAX) { - p_fifo = (q_fifo_size / 256U) - 1U; - } - - return p_fifo; -} +#include "macsec.h" /** * @brief mgbe_poll_for_mac_accrtl - Poll for Indirect Access control and status @@ -285,17 +38,18 @@ static nveu32_t mgbe_calculate_per_queue_fifo(nveu32_t fifo_size, * Algorithm: Waits for waits for transfer busy bit to be cleared in * MAC Indirect address control register to complete operations. * - * @param[in] addr: MGBE virtual base address. + * @param[in] osi_core: osi core priv data structure * * @note MAC needs to be out of reset and proper clock configured. * * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core) +static nve32_t mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core) { nveu32_t count = 0U; nveu32_t mac_indir_addr_ctrl = 0U; + nve32_t ret = -1; /* Poll Until MAC_Indir_Access_Ctrl OB is clear */ while (count < MGBE_MAC_INDIR_AC_OB_RETRY) { @@ -304,7 +58,8 @@ static int mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core) MGBE_MAC_INDIR_AC); if ((mac_indir_addr_ctrl & MGBE_MAC_INDIR_AC_OB) == OSI_NONE) { /* OB is clear exit the loop */ - return 0; + ret = 0; + break; } /* wait for 10 usec for OB clear and retry */ @@ -312,7 +67,7 @@ static int mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core) count++; } - return -1; + return ret; } /** @@ -320,7 +75,7 @@ static int mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core) * * Algorithm: writes MAC Indirect AC register * - * @param[in] base: MGBE virtual base address. + * @param[in] osi_core: osi core priv data structure * @param[in] mc_no: MAC AC Mode Select number * @param[in] addr_offset: MAC AC Address Offset. * @param[in] value: MAC AC register value @@ -330,13 +85,14 @@ static int mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_mac_indir_addr_write(struct osi_core_priv_data *osi_core, - nveu32_t mc_no, - nveu32_t addr_offset, - nveu32_t value) +static nve32_t mgbe_mac_indir_addr_write(struct osi_core_priv_data *osi_core, + nveu32_t mc_no, + nveu32_t addr_offset, + nveu32_t value) { void *base = osi_core->base; nveu32_t addr = 0; + nve32_t ret = 0; /* Write MAC_Indir_Access_Data register value */ osi_writela(osi_core, value, (nveu8_t *)base + MGBE_MAC_INDIR_DATA); @@ -365,12 +121,12 @@ static int mgbe_mac_indir_addr_write(struct osi_core_priv_data *osi_core, /* Wait until OB bit reset */ if (mgbe_poll_for_mac_acrtl(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write MAC_Indir_Access_Ctrl\n", mc_no); - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -378,7 +134,7 @@ static int mgbe_mac_indir_addr_write(struct osi_core_priv_data *osi_core, * * Algorithm: Reads MAC Indirect AC register * - * @param[in] base: MGBE virtual base address. + * @param[in] osi_core: osi core priv data structure * @param[in] mc_no: MAC AC Mode Select number * @param[in] addr_offset: MAC AC Address Offset. * @param[in] value: Pointer MAC AC register value @@ -388,13 +144,14 @@ static int mgbe_mac_indir_addr_write(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_mac_indir_addr_read(struct osi_core_priv_data *osi_core, - nveu32_t mc_no, - nveu32_t addr_offset, - nveu32_t *value) +static nve32_t mgbe_mac_indir_addr_read(struct osi_core_priv_data *osi_core, + nveu32_t mc_no, + nveu32_t addr_offset, + nveu32_t *value) { void *base = osi_core->base; nveu32_t addr = 0; + nve32_t ret = 0; /* Program MAC_Indir_Access_Ctrl */ addr = osi_readla(osi_core, (nveu8_t *)base + MGBE_MAC_INDIR_AC); @@ -420,116 +177,15 @@ static int mgbe_mac_indir_addr_read(struct osi_core_priv_data *osi_core, /* Wait until OB bit reset */ if (mgbe_poll_for_mac_acrtl(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write MAC_Indir_Access_Ctrl\n", mc_no); - return -1; + ret = -1; + goto fail; } /* Read MAC_Indir_Access_Data register value */ *value = osi_readla(osi_core, (nveu8_t *)base + MGBE_MAC_INDIR_DATA); - return 0; -} - -/** - * @brief mgbe_config_l2_da_perfect_inverse_match - configure register for - * inverse or perfect match. - * - * Algorithm: This sequence is used to select perfect/inverse matching - * for L2 DA - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] perfect_inverse_match: 1 - inverse mode 0- perfect mode - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void mgbe_config_l2_da_perfect_inverse_match( - struct osi_core_priv_data *osi_core, - unsigned int perfect_inverse_match) -{ - unsigned int value = 0U; - - value = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_PFR); - value &= ~MGBE_MAC_PFR_DAIF; - if (perfect_inverse_match == OSI_INV_MATCH) { - /* Set DA Inverse Filtering */ - value |= MGBE_MAC_PFR_DAIF; - } - osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_PFR); -} - -/** - * @brief mgbe_config_mac_pkt_filter_reg - configure mac filter register. - * - * Algorithm: This sequence is used to configure MAC in differnet pkt - * processing modes like promiscuous, multicast, unicast, - * hash unicast/multicast. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter: OSI filter structure. - * - * @note 1) MAC should be initialized and started. see osi_start_mac() - * - * @retval 0 always - */ -static int mgbe_config_mac_pkt_filter_reg(struct osi_core_priv_data *osi_core, - const struct osi_filter *filter) -{ - unsigned int value = 0U; - int ret = 0; - - value = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_PFR); - - /* Retain all other values */ - value &= (MGBE_MAC_PFR_DAIF | MGBE_MAC_PFR_DBF | MGBE_MAC_PFR_SAIF | - MGBE_MAC_PFR_SAF | MGBE_MAC_PFR_PCF | MGBE_MAC_PFR_VTFE | - MGBE_MAC_PFR_IPFE | MGBE_MAC_PFR_DNTU | MGBE_MAC_PFR_RA); - - if ((filter->oper_mode & OSI_OPER_EN_PROMISC) != OSI_DISABLE) { - /* Set Promiscuous Mode Bit */ - value |= MGBE_MAC_PFR_PR; - } - - if ((filter->oper_mode & OSI_OPER_DIS_PROMISC) != OSI_DISABLE) { - /* Reset Promiscuous Mode Bit */ - value &= ~MGBE_MAC_PFR_PR; - } - - if ((filter->oper_mode & OSI_OPER_EN_ALLMULTI) != OSI_DISABLE) { - /* Set Pass All Multicast Bit */ - value |= MGBE_MAC_PFR_PM; - } - - if ((filter->oper_mode & OSI_OPER_DIS_ALLMULTI) != OSI_DISABLE) { - /* Reset Pass All Multicast Bit */ - value &= ~MGBE_MAC_PFR_PM; - } - - if ((filter->oper_mode & OSI_OPER_EN_PERFECT) != OSI_DISABLE) { - /* Set Hash or Perfect Filter Bit */ - value |= MGBE_MAC_PFR_HPF; - } - - if ((filter->oper_mode & OSI_OPER_DIS_PERFECT) != OSI_DISABLE) { - /* Reset Hash or Perfect Filter Bit */ - value &= ~MGBE_MAC_PFR_HPF; - } - - osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_PFR); - - if ((filter->oper_mode & OSI_OPER_EN_L2_DA_INV) != OSI_DISABLE) { - mgbe_config_l2_da_perfect_inverse_match(osi_core, - OSI_INV_MATCH); - } - - if ((filter->oper_mode & OSI_OPER_DIS_L2_DA_INV) != OSI_DISABLE) { - mgbe_config_l2_da_perfect_inverse_match(osi_core, - OSI_PFT_MATCH); - } - +fail: return ret; } @@ -548,8 +204,8 @@ static int mgbe_config_mac_pkt_filter_reg(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_filter_args_validate(struct osi_core_priv_data *const osi_core, - const struct osi_filter *filter) +static nve32_t mgbe_filter_args_validate(struct osi_core_priv_data *const osi_core, + const struct osi_filter *filter) { nveu32_t idx = filter->index; nveu32_t dma_routing_enable = filter->dma_routing; @@ -557,22 +213,26 @@ static int mgbe_filter_args_validate(struct osi_core_priv_data *const osi_core, nveu32_t addr_mask = filter->addr_mask; nveu32_t src_dest = filter->src_dest; nveu32_t dma_chansel = filter->dma_chansel; + nve32_t ret = 0; + (void) osi_core; /* check for valid index (0 to 31) */ if (idx >= OSI_MGBE_MAX_MAC_ADDRESS_FILTER) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid MAC filter index\n", idx); - return -1; + ret = -1; + goto fail; } /* check for DMA channel index (0 to 9) */ - if ((dma_chan > OSI_MGBE_MAX_NUM_CHANS - 0x1U) && - (dma_chan != OSI_CHAN_ANY)){ + if ((dma_chan > (OSI_MGBE_MAX_NUM_CHANS - 0x1U)) && + (dma_chan != OSI_CHAN_ANY)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "invalid dma channel\n", (nveul64_t)dma_chan); - return -1; + ret = -1; + goto fail; } /* validate dma_chansel argument */ @@ -580,35 +240,38 @@ static int mgbe_filter_args_validate(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "invalid dma_chansel value\n", dma_chansel); - return -1; + ret = -1; + goto fail; } /* validate addr_mask argument */ if (addr_mask > MGBE_MAB_ADDRH_MBC_MAX_MASK) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid addr_mask value\n", addr_mask); - return -1; + ret = -1; + goto fail; } /* validate src_dest argument */ - if (src_dest != OSI_SA_MATCH && src_dest != OSI_DA_MATCH) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if ((src_dest != OSI_SA_MATCH) && (src_dest != OSI_DA_MATCH)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid src_dest value\n", src_dest); - return -1; + ret = -1; + goto fail; } /* validate dma_routing_enable argument */ - if (dma_routing_enable != OSI_ENABLE && - dma_routing_enable != OSI_DISABLE) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if ((dma_routing_enable != OSI_ENABLE) && + (dma_routing_enable != OSI_DISABLE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid dma_routing value\n", dma_routing_enable); - return -1; + ret = -1; } - - return 0; +fail: + return ret; } /** @@ -629,7 +292,7 @@ static int mgbe_filter_args_validate(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_update_mac_addr_low_high_reg( +static nve32_t mgbe_update_mac_addr_low_high_reg( struct osi_core_priv_data *const osi_core, const struct osi_filter *filter) { @@ -646,7 +309,8 @@ static int mgbe_update_mac_addr_low_high_reg( /* Validate filter values */ if (mgbe_filter_args_validate(osi_core, filter) < 0) { /* Filter argments validation got failed */ - return -1; + ret = -1; + goto fail; } value = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -660,7 +324,7 @@ static int mgbe_update_mac_addr_low_high_reg( if (ret < 0) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "indirect register read failed\n", 0ULL); - return -1; + goto fail; } /* preserve last XDCS bits */ @@ -682,46 +346,38 @@ static int mgbe_update_mac_addr_low_high_reg( osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_ADDRH((idx))); osi_writela(osi_core, OSI_MAX_32BITS, - (unsigned char *)osi_core->base + MGBE_MAC_ADDRL((idx))); - - return 0; - } - - /* Add DMA channel to value in binary */ - value = OSI_NONE; - value |= ((dma_chan << MGBE_MAC_ADDRH_DCS_SHIFT) & - MGBE_MAC_ADDRH_DCS); - - if (idx != 0U) { - /* Add Address mask */ - value |= ((addr_mask << MGBE_MAC_ADDRH_MBC_SHIFT) & - MGBE_MAC_ADDRH_MBC); + (nveu8_t *)osi_core->base + MGBE_MAC_ADDRL((idx))); + } else { + /* Add DMA channel to value in binary */ + value = OSI_NONE; + value |= ((dma_chan << MGBE_MAC_ADDRH_DCS_SHIFT) & MGBE_MAC_ADDRH_DCS); - /* Setting Source/Destination Address match valid */ - value |= ((src_dest << MGBE_MAC_ADDRH_SA_SHIFT) & - MGBE_MAC_ADDRH_SA); - } + if (idx != 0U) { + /* Add Address mask */ + value |= ((addr_mask << MGBE_MAC_ADDRH_MBC_SHIFT) & + MGBE_MAC_ADDRH_MBC); - osi_writela(osi_core, ((unsigned int)addr[4] | - ((unsigned int)addr[5] << 8) | - MGBE_MAC_ADDRH_AE | - value), - (unsigned char *)osi_core->base + MGBE_MAC_ADDRH((idx))); + /* Setting Source/Destination Address match valid */ + value |= ((src_dest << MGBE_MAC_ADDRH_SA_SHIFT) & + MGBE_MAC_ADDRH_SA); + } - osi_writela(osi_core, ((unsigned int)addr[0] | - ((unsigned int)addr[1] << 8) | - ((unsigned int)addr[2] << 16) | - ((unsigned int)addr[3] << 24)), - (unsigned char *)osi_core->base + MGBE_MAC_ADDRL((idx))); + osi_writela(osi_core, + ((nveu32_t)addr[4] | ((nveu32_t)addr[5] << 8) | + MGBE_MAC_ADDRH_AE | value), + (nveu8_t *)osi_core->base + MGBE_MAC_ADDRH((idx))); - /* Write XDCS configuration into MAC_DChSel_IndReg(x) */ - /* Append DCS DMA channel to XDCS hot bit selection */ - xdcs_check |= (OSI_BIT(dma_chan) | dma_chansel); - ret = mgbe_mac_indir_addr_write(osi_core, - MGBE_MAC_DCHSEL, - idx, - xdcs_check); + osi_writela(osi_core, + ((nveu32_t)addr[0] | ((nveu32_t)addr[1] << 8) | + ((nveu32_t)addr[2] << 16) | ((nveu32_t)addr[3] << 24)), + (nveu8_t *)osi_core->base + MGBE_MAC_ADDRL((idx))); + /* Write XDCS configuration into MAC_DChSel_IndReg(x) */ + /* Append DCS DMA channel to XDCS hot bit selection */ + xdcs_check |= (OSI_BIT(dma_chan) | dma_chansel); + ret = mgbe_mac_indir_addr_write(osi_core, MGBE_MAC_DCHSEL, idx, xdcs_check); + } +fail: return ret; } @@ -731,32 +387,34 @@ static int mgbe_update_mac_addr_low_high_reg( * Algorithm: Waits for waits for transfer busy bit to be cleared in * L3_L4 address control register to complete filter register operations. * - * @param[in] addr: MGBE virtual base address. + * @param[in] osi_core: osi core priv data structure * * @note MAC needs to be out of reset and proper clock configured. * * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core) +static nve32_t mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core) { - unsigned int retry = 10; - unsigned int count; - unsigned int l3l4_addr_ctrl = 0; - int cond = 1; + nveu32_t retry = 10; + nveu32_t count; + nveu32_t l3l4_addr_ctrl = 0; + nve32_t cond = 1; + nve32_t ret = 0; /* Poll Until L3_L4_Address_Control XB is clear */ count = 0; while (cond == 1) { if (count > retry) { /* Return error after max retries */ - return -1; + ret = -1; + goto fail; } count++; l3l4_addr_ctrl = osi_readla(osi_core, - (unsigned char *)osi_core->base + + (nveu8_t *)osi_core->base + MGBE_MAC_L3L4_ADDR_CTR); if ((l3l4_addr_ctrl & MGBE_MAC_L3L4_ADDR_CTR_XB) == OSI_NONE) { /* Set cond to 0 to exit loop */ @@ -766,8 +424,8 @@ static int mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core) osi_core->osd_ops.udelay(MGBE_MAC_XB_WAIT); } } - - return 0; +fail: + return ret; } /** @@ -775,7 +433,7 @@ static int mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core) * * Algorithm: writes L3_L4 filter register * - * @param[in] base: MGBE virtual base address. + * @param[in] osi_core: osi core priv data structure * @param[in] filter_no: MGBE L3_L4 filter number * @param[in] filter_type: MGBE L3_L4 filter register type. * @param[in] value: MGBE L3_L4 filter register value @@ -785,21 +443,22 @@ static int mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_l3l4_filter_write(struct osi_core_priv_data *osi_core, - unsigned int filter_no, - unsigned int filter_type, - unsigned int value) +static nve32_t mgbe_l3l4_filter_write(struct osi_core_priv_data *osi_core, + nveu32_t filter_no, + nveu32_t filter_type, + nveu32_t value) { void *base = osi_core->base; - unsigned int addr = 0; + nveu32_t addr = 0; + nve32_t ret = 0; /* Write MAC_L3_L4_Data register value */ osi_writela(osi_core, value, - (unsigned char *)base + MGBE_MAC_L3L4_DATA); + (nveu8_t *)base + MGBE_MAC_L3L4_DATA); /* Program MAC_L3_L4_Address_Control */ addr = osi_readla(osi_core, - (unsigned char *)base + MGBE_MAC_L3L4_ADDR_CTR); + (nveu8_t *)base + MGBE_MAC_L3L4_ADDR_CTR); /* update filter number */ addr &= ~(MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM); @@ -819,717 +478,119 @@ static int mgbe_l3l4_filter_write(struct osi_core_priv_data *osi_core, /* Write MGBE_MAC_L3L4_ADDR_CTR */ osi_writela(osi_core, addr, - (unsigned char *)base + MGBE_MAC_L3L4_ADDR_CTR); + (nveu8_t *)base + MGBE_MAC_L3L4_ADDR_CTR); /* Wait untile XB bit reset */ if (mgbe_poll_for_l3l4crtl(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write L3_L4_Address_Control\n", filter_type); - return -1; + ret = -1; } - return 0; + return ret; } /** - * @brief mgbe_l3l4_filter_read - L3_L4 filter register read. + * @brief mgbe_config_l3l4_filters - Config L3L4 filters. * - * Algorithm: writes L3_L4 filter register + * @note + * Algorithm: + * - This sequence is used to configure L3L4 filters for SA and DA Port Number matching. + * - Prepare register data using prepare_l3l4_registers(). + * - Write l3l4 reigsters using mgbe_l3l4_filter_write(). + * - Return 0 on success. + * - Return -1 on any register failure. * - * @param[in] base: MGBE virtual base address. - * @param[in] filter_no: MGBE L3_L4 filter number - * @param[in] filter_type: MGBE L3_L4 filter register type. - * @param[in] *value: Pointer MGBE L3_L4 filter register value + * @param[in] osi_core: OSI core private data structure. + * @param[in] filter_no_r: filter index + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) * - * @note MAC needs to be out of reset and proper clock configured. + * @note 1) MAC should be init and started. see osi_start_mac() + * 2) osi_core->osd should be populated * * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_l3l4_filter_read(struct osi_core_priv_data *osi_core, - unsigned int filter_no, - unsigned int filter_type, - unsigned int *value) -{ - void *base = osi_core->base; - unsigned int addr = 0; +static nve32_t mgbe_config_l3l4_filters(struct osi_core_priv_data *const osi_core, + nveu32_t filter_no_r, + const struct osi_l3_l4_filter *const l3_l4) +{ +#ifndef OSI_STRIPPED_LIB + nveu32_t l3_addr0_reg = 0; + nveu32_t l3_addr2_reg = 0; + nveu32_t l3_addr3_reg = 0; + nveu32_t l4_addr_reg = 0; +#endif /* !OSI_STRIPPED_LIB */ + nveu32_t l3_addr1_reg = 0; + nveu32_t ctr_reg = 0; + nveu32_t filter_no = filter_no_r & (OSI_MGBE_MAX_L3_L4_FILTER - 1U); + nve32_t err; + nve32_t ret = -1; - /* Program MAC_L3_L4_Address_Control */ - addr = osi_readla(osi_core, - (unsigned char *)base + MGBE_MAC_L3L4_ADDR_CTR); + prepare_l3l4_registers(osi_core, l3_l4, +#ifndef OSI_STRIPPED_LIB + &l3_addr0_reg, + &l3_addr2_reg, + &l3_addr3_reg, + &l4_addr_reg, +#endif /* !OSI_STRIPPED_LIB */ + &l3_addr1_reg, + &ctr_reg); + +#ifndef OSI_STRIPPED_LIB + /* Update l3 ip addr MGBE_MAC_L3_AD0R register */ + err = mgbe_l3l4_filter_write(osi_core, filter_no, MGBE_MAC_L3_AD0R, l3_addr0_reg); + if (err < 0) { + /* Write MGBE_MAC_L3_AD0R fail return error */ + goto exit_func; + } - /* update filter number */ - addr &= ~(MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM); - addr |= ((filter_no << MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM_SHIFT) & - MGBE_MAC_L3L4_ADDR_CTR_IDDR_FNUM); + /* Update l3 ip addr MGBE_MAC_L3_AD2R register */ + err = mgbe_l3l4_filter_write(osi_core, filter_no, MGBE_MAC_L3_AD2R, l3_addr2_reg); + if (err < 0) { + /* Write MGBE_MAC_L3_AD2R fail return error */ + goto exit_func; + } - /* update filter type */ - addr &= ~(MGBE_MAC_L3L4_ADDR_CTR_IDDR_FTYPE); - addr |= ((filter_type << MGBE_MAC_L3L4_ADDR_CTR_IDDR_FTYPE_SHIFT) & - MGBE_MAC_L3L4_ADDR_CTR_IDDR_FTYPE); - - /* Set TT field 1 for read */ - addr |= MGBE_MAC_L3L4_ADDR_CTR_TT; - - /* Set XB bit to initiate write */ - addr |= MGBE_MAC_L3L4_ADDR_CTR_XB; - - /* Write MGBE_MAC_L3L4_ADDR_CTR */ - osi_writela(osi_core, addr, - (unsigned char *)base + MGBE_MAC_L3L4_ADDR_CTR); - - /* Wait untile XB bit reset */ - if (mgbe_poll_for_l3l4crtl(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "Fail to read L3L4 Address\n", - filter_type); - return -1; - } - - /* Read the MGBE_MAC_L3L4_DATA for filter register data */ - *value = osi_readla(osi_core, - (unsigned char *)base + MGBE_MAC_L3L4_DATA); - return 0; -} - -/** - * @brief mgbe_update_ip4_addr - configure register for IPV4 address filtering - * - * Algorithm: This sequence is used to update IPv4 source/destination - * Address for L3 layer filtering - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] addr: ipv4 address - * @param[in] src_dst_addr_match: 0 - source addr otherwise - dest addr - * - * @note 1) MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_update_ip4_addr(struct osi_core_priv_data *osi_core, - const unsigned int filter_no, - const unsigned char addr[], - const unsigned int src_dst_addr_match) -{ - unsigned int value = 0U; - unsigned int temp = 0U; - int ret = 0; - - if (addr == OSI_NULL) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid address\n", - 0ULL); - return -1; - } - - if (filter_no >= OSI_MGBE_MAX_L3_L4_FILTER) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (unsigned long long)filter_no); - return -1; - } - - /* validate src_dst_addr_match argument */ - if (src_dst_addr_match != OSI_SOURCE_MATCH && - src_dst_addr_match != OSI_INV_MATCH) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid src_dst_addr_match value\n", - src_dst_addr_match); - return -1; - } - - value = addr[3]; - temp = (unsigned int)addr[2] << 8; - value |= temp; - temp = (unsigned int)addr[1] << 16; - value |= temp; - temp = (unsigned int)addr[0] << 24; - value |= temp; - if (src_dst_addr_match == OSI_SOURCE_MATCH) { - ret = mgbe_l3l4_filter_write(osi_core, - filter_no, - MGBE_MAC_L3_AD0R, - value); - } else { - ret = mgbe_l3l4_filter_write(osi_core, - filter_no, - MGBE_MAC_L3_AD1R, - value); - } - - return ret; -} - -/** - * @brief mgbe_update_ip6_addr - add ipv6 address in register - * - * Algorithm: This sequence is used to update IPv6 source/destination - * Address for L3 layer filtering - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] addr: ipv6 adderss - * - * @note 1) MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_update_ip6_addr(struct osi_core_priv_data *osi_core, - const unsigned int filter_no, - const unsigned short addr[]) -{ - unsigned int value = 0U; - unsigned int temp = 0U; - int ret = 0; - - if (addr == OSI_NULL) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid address\n", - 0ULL); - return -1; + /* Update l3 ip addr MGBE_MAC_L3_AD3R register */ + err = mgbe_l3l4_filter_write(osi_core, filter_no, MGBE_MAC_L3_AD3R, l3_addr3_reg); + if (err < 0) { + /* Write MGBE_MAC_L3_AD3R fail return error */ + goto exit_func; } - if (filter_no >= OSI_MGBE_MAX_L3_L4_FILTER) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "invalid filter index for L3/L4 filter\n", - (unsigned long long)filter_no); - return -1; - } - - /* update Bits[31:0] of 128-bit IP addr */ - value = addr[7]; - temp = (unsigned int)addr[6] << 16; - value |= temp; - - ret = mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L3_AD0R, value); - if (ret < 0) { - /* Write MGBE_MAC_L3_AD0R fail return error */ - return ret; + /* Update l4 port register MGBE_MAC_L4_ADDR register */ + err = mgbe_l3l4_filter_write(osi_core, filter_no, MGBE_MAC_L4_ADDR, l4_addr_reg); + if (err < 0) { + /* Write MGBE_MAC_L4_ADDR fail return error */ + goto exit_func; } - /* update Bits[63:32] of 128-bit IP addr */ - value = addr[5]; - temp = (unsigned int)addr[4] << 16; - value |= temp; +#endif /* !OSI_STRIPPED_LIB */ - ret = mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L3_AD1R, value); - if (ret < 0) { + /* Update l3 ip addr MGBE_MAC_L3_AD1R register */ + err = mgbe_l3l4_filter_write(osi_core, filter_no, MGBE_MAC_L3_AD1R, l3_addr1_reg); + if (err < 0) { /* Write MGBE_MAC_L3_AD1R fail return error */ - return ret; + goto exit_func; } - /* update Bits[95:64] of 128-bit IP addr */ - value = addr[3]; - temp = (unsigned int)addr[2] << 16; - value |= temp; - - ret = mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L3_AD2R, value); - if (ret < 0) { - /* Write MGBE_MAC_L3_AD2R fail return error */ - return ret; - } - - /* update Bits[127:96] of 128-bit IP addr */ - value = addr[1]; - temp = (unsigned int)addr[0] << 16; - value |= temp; - return mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L3_AD3R, value); -} - -/** - * @brief mgbe_config_l3_l4_filter_enable - register write to enable L3/L4 - * filters. - * - * Algorithm: This routine to enable/disable L3/l4 filter - * - * @param[in] osi_core: OSI core private data structure. - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_config_l3_l4_filter_enable( - struct osi_core_priv_data *const osi_core, - unsigned int filter_enb_dis) -{ - unsigned int value = 0U; - void *base = osi_core->base; - - /* validate filter_enb_dis argument */ - if (filter_enb_dis != OSI_ENABLE && filter_enb_dis != OSI_DISABLE) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "Invalid filter_enb_dis value\n", - filter_enb_dis); - return -1; - } - - value = osi_readla(osi_core, (unsigned char *)base + MGBE_MAC_PFR); - value &= ~(MGBE_MAC_PFR_IPFE); - value |= ((filter_enb_dis << MGBE_MAC_PFR_IPFE_SHIFT) & - MGBE_MAC_PFR_IPFE); - osi_writela(osi_core, value, (unsigned char *)base + MGBE_MAC_PFR); - - return 0; -} - -/** - * @brief mgbe_update_l4_port_no -program source port no - * - * Algorithm: sequence is used to update Source Port Number for - * L4(TCP/UDP) layer filtering. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] port_no: port number - * @param[in] src_dst_port_match: 0 - source port, otherwise - dest port - * - * @note 1) MAC should be init and started. see osi_start_mac() - * 2) osi_core->osd should be populated - * 3) DCS bits should be enabled in RXQ to DMA mapping register - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_update_l4_port_no(struct osi_core_priv_data *osi_core, - unsigned int filter_no, - unsigned short port_no, - unsigned int src_dst_port_match) -{ - unsigned int value = 0U; - unsigned int temp = 0U; - int ret = 0; - - if (filter_no >= OSI_MGBE_MAX_L3_L4_FILTER) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (unsigned long long)filter_no); - return -1; - } - - ret = mgbe_l3l4_filter_read(osi_core, filter_no, - MGBE_MAC_L4_ADDR, &value); - if (ret < 0) { - /* Read MGBE_MAC_L4_ADDR fail return error */ - return ret; - } - - if (src_dst_port_match == OSI_SOURCE_MATCH) { - value &= ~MGBE_MAC_L4_ADDR_SP_MASK; - value |= ((unsigned int)port_no & MGBE_MAC_L4_ADDR_SP_MASK); - } else { - value &= ~MGBE_MAC_L4_ADDR_DP_MASK; - temp = port_no; - value |= ((temp << MGBE_MAC_L4_ADDR_DP_SHIFT) & - MGBE_MAC_L4_ADDR_DP_MASK); - } - - return mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L4_ADDR, value); -} - -/** - * @brief mgbe_set_dcs - check and update dma routing register - * - * Algorithm: Check for request for DCS_enable as well as validate chan - * number and dcs_enable is set. After validation, this sequence is used - * to configure L3((IPv4/IPv6) filters for address matching. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] value: unsigned int value for caller - * @param[in] dma_routing_enable: filter based dma routing enable(1) - * @param[in] dma_chan: dma channel for routing based on filter - * - * @note 1) MAC IP should be out of reset and need to be initialized - * as the requirements. - * 2) DCS bit of RxQ should be enabled for dynamic channel selection - * in filter support - * - * @retval updated unsigned int value param - */ -static inline unsigned int mgbe_set_dcs(struct osi_core_priv_data *osi_core, - unsigned int value, - unsigned int dma_routing_enable, - unsigned int dma_chan) -{ - if ((dma_routing_enable == OSI_ENABLE) && (dma_chan < - OSI_MGBE_MAX_NUM_CHANS) && (osi_core->dcs_en == - OSI_ENABLE)) { - value |= ((dma_routing_enable << - MGBE_MAC_L3L4_CTR_DMCHEN0_SHIFT) & - MGBE_MAC_L3L4_CTR_DMCHEN0); - value |= ((dma_chan << - MGBE_MAC_L3L4_CTR_DMCHN0_SHIFT) & - MGBE_MAC_L3L4_CTR_DMCHN0); - } - - return value; -} - -/** - * @brief mgbe_helper_l3l4_bitmask - helper function to set L3L4 - * bitmask. - * - * Algorithm: set bit corresponding to L3l4 filter index - * - * @param[in] bitmask: bit mask OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] value: 0 - disable otherwise - l3/l4 filter enabled - * - * @note 1) MAC should be init and started. see osi_start_mac() - */ -static inline void mgbe_helper_l3l4_bitmask(unsigned int *bitmask, - unsigned int filter_no, - unsigned int value) -{ - unsigned int temp; - - temp = OSI_ENABLE; - temp = temp << filter_no; - - /* check against all bit fields for L3L4 filter enable */ - if ((value & MGBE_MAC_L3L4_CTRL_ALL) != OSI_DISABLE) { - /* Set bit mask for index */ - *bitmask |= temp; - } else { - /* Reset bit mask for index */ - *bitmask &= ~temp; - } -} - -/** - * @brief mgbe_config_l3_filters - config L3 filters. - * - * Algorithm: Check for DCS_enable as well as validate channel - * number and if dcs_enable is set. After validation, code flow - * is used to configure L3((IPv4/IPv6) filters resister - * for address matching. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] enb_dis: 1 - enable otherwise - disable L3 filter - * @param[in] ipv4_ipv6_match: 1 - IPv6, otherwise - IPv4 - * @param[in] src_dst_addr_match: 0 - source, otherwise - destination - * @param[in] perfect_inverse_match: normal match(0) or inverse map(1) - * @param[in] dma_routing_enable: filter based dma routing enable(1) - * @param[in] dma_chan: dma channel for routing based on filter - * - * @note 1) MAC should be init and started. see osi_start_mac() - * 2) osi_core->osd should be populated - * 3) DCS bit of RxQ should be enabled for dynamic channel selection - * in filter support - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_config_l3_filters(struct osi_core_priv_data *osi_core, - unsigned int filter_no, - unsigned int enb_dis, - unsigned int ipv4_ipv6_match, - unsigned int src_dst_addr_match, - unsigned int perfect_inverse_match, - unsigned int dma_routing_enable, - unsigned int dma_chan) -{ - unsigned int value = 0U; - int ret = 0; - - if (filter_no >= OSI_MGBE_MAX_L3_L4_FILTER) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (unsigned long long)filter_no); - return -1; - } - /* validate enb_dis argument */ - if (enb_dis != OSI_ENABLE && enb_dis != OSI_DISABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid filter_enb_dis value\n", - enb_dis); - return -1; - } - /* validate ipv4_ipv6_match argument */ - if (ipv4_ipv6_match != OSI_IPV6_MATCH && - ipv4_ipv6_match != OSI_IPV4_MATCH) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid ipv4_ipv6_match value\n", - ipv4_ipv6_match); - return -1; - } - /* validate src_dst_addr_match argument */ - if (src_dst_addr_match != OSI_SOURCE_MATCH && - src_dst_addr_match != OSI_INV_MATCH) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid src_dst_addr_match value\n", - src_dst_addr_match); - return -1; - } - /* validate perfect_inverse_match argument */ - if (perfect_inverse_match != OSI_ENABLE && - perfect_inverse_match != OSI_DISABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid perfect_inverse_match value\n", - perfect_inverse_match); - return -1; - } - if ((dma_routing_enable == OSI_ENABLE) && - (dma_chan > OSI_MGBE_MAX_NUM_CHANS - 1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "Wrong DMA channel\n", - (unsigned long long)dma_chan); - return -1; - } - - ret = mgbe_l3l4_filter_read(osi_core, filter_no, - MGBE_MAC_L3L4_CTR, &value); - if (ret < 0) { - /* MGBE_MAC_L3L4_CTR read fail return here */ - return ret; - } - - value &= ~MGBE_MAC_L3L4_CTR_L3PEN0; - value |= (ipv4_ipv6_match & MGBE_MAC_L3L4_CTR_L3PEN0); - - /* For IPv6 either SA/DA can be checked not both */ - if (ipv4_ipv6_match == OSI_IPV6_MATCH) { - if (enb_dis == OSI_ENABLE) { - if (src_dst_addr_match == OSI_SOURCE_MATCH) { - /* Enable L3 filters for IPv6 SOURCE addr - * matching - */ - value &= ~MGBE_MAC_L3_IP6_CTRL_CLEAR; - value |= ((MGBE_MAC_L3L4_CTR_L3SAM0 | - perfect_inverse_match << - MGBE_MAC_L3L4_CTR_L3SAIM0_SHIFT) & - ((MGBE_MAC_L3L4_CTR_L3SAM0 | - MGBE_MAC_L3L4_CTR_L3SAIM0))); - value |= mgbe_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - - } else { - /* Enable L3 filters for IPv6 DESTINATION addr - * matching - */ - value &= ~MGBE_MAC_L3_IP6_CTRL_CLEAR; - value |= ((MGBE_MAC_L3L4_CTR_L3DAM0 | - perfect_inverse_match << - MGBE_MAC_L3L4_CTR_L3DAIM0_SHIFT) & - ((MGBE_MAC_L3L4_CTR_L3DAM0 | - MGBE_MAC_L3L4_CTR_L3DAIM0))); - value |= mgbe_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - } - } else { - /* Disable L3 filters for IPv6 SOURCE/DESTINATION addr - * matching - */ - value &= ~(MGBE_MAC_L3_IP6_CTRL_CLEAR | - MGBE_MAC_L3L4_CTR_L3PEN0); - } - } else { - if (src_dst_addr_match == OSI_SOURCE_MATCH) { - if (enb_dis == OSI_ENABLE) { - /* Enable L3 filters for IPv4 SOURCE addr - * matching - */ - value &= ~MGBE_MAC_L3_IP4_SA_CTRL_CLEAR; - value |= ((MGBE_MAC_L3L4_CTR_L3SAM0 | - perfect_inverse_match << - MGBE_MAC_L3L4_CTR_L3SAIM0_SHIFT) & - ((MGBE_MAC_L3L4_CTR_L3SAM0 | - MGBE_MAC_L3L4_CTR_L3SAIM0))); - value |= mgbe_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - } else { - /* Disable L3 filters for IPv4 SOURCE addr - * matching - */ - value &= ~MGBE_MAC_L3_IP4_SA_CTRL_CLEAR; - } - } else { - if (enb_dis == OSI_ENABLE) { - /* Enable L3 filters for IPv4 DESTINATION addr - * matching - */ - value &= ~MGBE_MAC_L3_IP4_DA_CTRL_CLEAR; - value |= ((MGBE_MAC_L3L4_CTR_L3DAM0 | - perfect_inverse_match << - MGBE_MAC_L3L4_CTR_L3DAIM0_SHIFT) & - ((MGBE_MAC_L3L4_CTR_L3DAM0 | - MGBE_MAC_L3L4_CTR_L3DAIM0))); - value |= mgbe_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - } else { - /* Disable L3 filters for IPv4 DESTINATION addr - * matching - */ - value &= ~MGBE_MAC_L3_IP4_DA_CTRL_CLEAR; - } - } - } - - ret = mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L3L4_CTR, value); - if (ret < 0) { + /* Write CTR register */ + err = mgbe_l3l4_filter_write(osi_core, filter_no, MGBE_MAC_L3L4_CTR, ctr_reg); + if (err < 0) { /* Write MGBE_MAC_L3L4_CTR fail return error */ - return ret; - } - - /* Set bit corresponding to filter index if value is non-zero */ - mgbe_helper_l3l4_bitmask(&osi_core->l3l4_filter_bitmask, - filter_no, value); - - return ret; -} - -/** - * @brief mgbe_config_l4_filters - Config L4 filters. - * - * Algorithm: This sequence is used to configure L4(TCP/UDP) filters for - * SA and DA Port Number matching - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] filter_no: filter index - * @param[in] enb_dis: 1 - enable, otherwise - disable L4 filter - * @param[in] tcp_udp_match: 1 - udp, 0 - tcp - * @param[in] src_dst_port_match: 0 - source port, otherwise - dest port - * @param[in] perfect_inverse_match: normal match(0) or inverse map(1) - * @param[in] dma_routing_enable: filter based dma routing enable(1) - * @param[in] dma_chan: dma channel for routing based on filter - * - * @note 1) MAC should be init and started. see osi_start_mac() - * 2) osi_core->osd should be populated - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_config_l4_filters(struct osi_core_priv_data *osi_core, - unsigned int filter_no, - unsigned int enb_dis, - unsigned int tcp_udp_match, - unsigned int src_dst_port_match, - unsigned int perfect_inverse_match, - unsigned int dma_routing_enable, - unsigned int dma_chan) -{ - unsigned int value = 0U; - int ret = 0; - - if (filter_no >= OSI_MGBE_MAX_L3_L4_FILTER) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "invalid filter index for L3/L4 filter\n", - (unsigned long long)filter_no); - return -1; - } - /* validate enb_dis argument */ - if (enb_dis != OSI_ENABLE && enb_dis != OSI_DISABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid filter_enb_dis value\n", - enb_dis); - return -1; - } - /* validate tcp_udp_match argument */ - if (tcp_udp_match != OSI_ENABLE && tcp_udp_match != OSI_DISABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid tcp_udp_match value\n", - tcp_udp_match); - return -1; - } - /* validate src_dst_port_match argument */ - if (src_dst_port_match != OSI_SOURCE_MATCH && - src_dst_port_match != OSI_INV_MATCH) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid src_dst_port_match value\n", - src_dst_port_match); - return -1; - } - /* validate perfect_inverse_match argument */ - if (perfect_inverse_match != OSI_ENABLE && - perfect_inverse_match != OSI_DISABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Invalid perfect_inverse_match value\n", - perfect_inverse_match); - return -1; - } - if ((dma_routing_enable == OSI_ENABLE) && - (dma_chan > OSI_MGBE_MAX_NUM_CHANS - 1U)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, - "Wrong DMA channel\n", - (unsigned int)dma_chan); - return -1; - } - - ret = mgbe_l3l4_filter_read(osi_core, filter_no, - MGBE_MAC_L3L4_CTR, &value); - if (ret < 0) { - /* MGBE_MAC_L3L4_CTR read fail return here */ - return ret; - } - - value &= ~MGBE_MAC_L3L4_CTR_L4PEN0; - value |= ((tcp_udp_match << 16) & MGBE_MAC_L3L4_CTR_L4PEN0); - - if (src_dst_port_match == OSI_SOURCE_MATCH) { - if (enb_dis == OSI_ENABLE) { - /* Enable L4 filters for SOURCE Port No matching */ - value &= ~MGBE_MAC_L4_SP_CTRL_CLEAR; - value |= ((MGBE_MAC_L3L4_CTR_L4SPM0 | - perfect_inverse_match << - MGBE_MAC_L3L4_CTR_L4SPIM0_SHIFT) & - (MGBE_MAC_L3L4_CTR_L4SPM0 | - MGBE_MAC_L3L4_CTR_L4SPIM0)); - value |= mgbe_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - } else { - /* Disable L4 filters for SOURCE Port No matching */ - value &= ~MGBE_MAC_L4_SP_CTRL_CLEAR; - } - } else { - if (enb_dis == OSI_ENABLE) { - /* Enable L4 filters for DESTINATION port No - * matching - */ - value &= ~MGBE_MAC_L4_DP_CTRL_CLEAR; - value |= ((MGBE_MAC_L3L4_CTR_L4DPM0 | - perfect_inverse_match << - MGBE_MAC_L3L4_CTR_L4DPIM0_SHIFT) & - (MGBE_MAC_L3L4_CTR_L4DPM0 | - MGBE_MAC_L3L4_CTR_L4DPIM0)); - value |= mgbe_set_dcs(osi_core, value, - dma_routing_enable, - dma_chan); - } else { - /* Disable L4 filters for DESTINATION port No - * matching - */ - value &= ~MGBE_MAC_L4_DP_CTRL_CLEAR; - } + goto exit_func; } - ret = mgbe_l3l4_filter_write(osi_core, filter_no, - MGBE_MAC_L3L4_CTR, value); - if (ret < 0) { - /* Write MGBE_MAC_L3L4_CTR fail return error */ - return ret; - } + /* success */ + ret = 0; - /* Set bit corresponding to filter index if value is non-zero */ - mgbe_helper_l3l4_bitmask(&osi_core->l3l4_filter_bitmask, - filter_no, value); +exit_func: return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief mgbe_config_vlan_filter_reg - config vlan filter register * @@ -1547,13 +608,13 @@ static int mgbe_config_l4_filters(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_vlan_filtering(struct osi_core_priv_data *osi_core, - unsigned int filter_enb_dis, - unsigned int perfect_hash_filtering, - unsigned int perfect_inverse_match) +static nve32_t mgbe_config_vlan_filtering(struct osi_core_priv_data *osi_core, + const nveu32_t filter_enb_dis, + const nveu32_t perfect_hash_filtering, + const nveu32_t perfect_inverse_match) { - unsigned int value; - unsigned char *base = osi_core->base; + nveu32_t value; + nveu8_t *base = osi_core->base; /* validate perfect_inverse_match argument */ if (perfect_hash_filtering == OSI_HASH_FILTER_MODE) { @@ -1570,7 +631,7 @@ static int mgbe_config_vlan_filtering(struct osi_core_priv_data *osi_core, } /* validate filter_enb_dis argument */ - if (filter_enb_dis != OSI_ENABLE && filter_enb_dis != OSI_DISABLE) { + if ((filter_enb_dis != OSI_ENABLE) && (filter_enb_dis != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid filter_enb_dis value\n", filter_enb_dis); @@ -1578,8 +639,8 @@ static int mgbe_config_vlan_filtering(struct osi_core_priv_data *osi_core, } /* validate perfect_inverse_match argument */ - if (perfect_inverse_match != OSI_ENABLE && - perfect_inverse_match != OSI_DISABLE) { + if ((perfect_inverse_match != OSI_ENABLE) && + (perfect_inverse_match != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid perfect_inverse_match value\n", perfect_inverse_match); @@ -1618,13 +679,13 @@ static int mgbe_config_vlan_filtering(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_ptp_rxq(struct osi_core_priv_data *const osi_core, - const unsigned int rxq_idx, - const unsigned int enable) +static nve32_t mgbe_config_ptp_rxq(struct osi_core_priv_data *const osi_core, + const nveu32_t rxq_idx, + const nveu32_t enable) { - unsigned char *base = osi_core->base; - unsigned int value = 0U; - unsigned int i = 0U; + nveu8_t *base = osi_core->base; + nveu32_t value = 0U; + nveu32_t i = 0U; /* Validate the RX queue index argument */ if (rxq_idx >= OSI_MGBE_MAX_NUM_QUEUES) { @@ -1635,7 +696,7 @@ static int mgbe_config_ptp_rxq(struct osi_core_priv_data *const osi_core, } /* Validate enable argument */ - if (enable != OSI_ENABLE && enable != OSI_DISABLE) { + if ((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid enable input\n", enable); @@ -1686,60 +747,6 @@ static int mgbe_config_ptp_rxq(struct osi_core_priv_data *const osi_core, return 0; } -/** - * @brief mgbe_flush_mtl_tx_queue - Flush MTL Tx queue - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] qinx: MTL queue index. - * - * @note 1) MAC should out of reset and clocks enabled. - * 2) hw core initialized. see osi_hw_core_init(). - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t mgbe_flush_mtl_tx_queue( - struct osi_core_priv_data *const osi_core, - const nveu32_t qinx) -{ - void *addr = osi_core->base; - nveu32_t retry = 1000; - nveu32_t count; - nveu32_t value; - nve32_t cond = 1; - - if (qinx >= OSI_MGBE_MAX_NUM_QUEUES) { - return -1; - } - - /* Read Tx Q Operating Mode Register and flush TxQ */ - value = osi_readla(osi_core, (nveu8_t *)addr + - MGBE_MTL_CHX_TX_OP_MODE(qinx)); - value |= MGBE_MTL_QTOMR_FTQ; - osi_writela(osi_core, value, (nveu8_t *)addr + - MGBE_MTL_CHX_TX_OP_MODE(qinx)); - - /* Poll Until FTQ bit resets for Successful Tx Q flush */ - count = 0; - while (cond == 1) { - if (count > retry) { - return -1; - } - - count++; - - value = osi_readla(osi_core, (nveu8_t *)addr + - MGBE_MTL_CHX_TX_OP_MODE(qinx)); - if ((value & MGBE_MTL_QTOMR_FTQ_LPOS) == OSI_NONE) { - cond = 0; - } else { - osi_core->osd_ops.msleep(1); - } - } - - return 0; -} - /** * @brief mgbe_config_mac_loopback - Configure MAC to support loopback * @@ -1752,19 +759,19 @@ static nve32_t mgbe_flush_mtl_tx_queue( * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_mac_loopback(struct osi_core_priv_data *const osi_core, - unsigned int lb_mode) +static nve32_t mgbe_config_mac_loopback(struct osi_core_priv_data *const osi_core, + nveu32_t lb_mode) { - unsigned int value; + nveu32_t value; void *addr = osi_core->base; /* don't allow only if loopback mode is other than 0 or 1 */ - if (lb_mode != OSI_ENABLE && lb_mode != OSI_DISABLE) { + if ((lb_mode != OSI_ENABLE) && (lb_mode != OSI_DISABLE)) { return -1; } /* Read MAC Configuration Register */ - value = osi_readla(osi_core, (unsigned char *)addr + MGBE_MAC_RMCR); + value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_RMCR); if (lb_mode == OSI_ENABLE) { /* Enable Loopback Mode */ value |= MGBE_MAC_RMCR_LM; @@ -1772,7 +779,7 @@ static int mgbe_config_mac_loopback(struct osi_core_priv_data *const osi_core, value &= ~MGBE_MAC_RMCR_LM; } - osi_writela(osi_core, value, (unsigned char *)addr + MGBE_MAC_RMCR); + osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_RMCR); return 0; } @@ -1797,77 +804,39 @@ static int mgbe_config_mac_loopback(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_arp_offload(struct osi_core_priv_data *const osi_core, - const unsigned int enable, - const unsigned char *ip_addr) +static nve32_t mgbe_config_arp_offload(struct osi_core_priv_data *const osi_core, + const nveu32_t enable, + const nveu8_t *ip_addr) { - unsigned int mac_rmcr; - unsigned int val; + nveu32_t mac_rmcr; + nveu32_t val; void *addr = osi_core->base; - if (enable != OSI_ENABLE && enable != OSI_DISABLE) { + if ((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) { return -1; } - mac_rmcr = osi_readla(osi_core, (unsigned char *)addr + MGBE_MAC_RMCR); + mac_rmcr = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_RMCR); if (enable == OSI_ENABLE) { - val = (((unsigned int)ip_addr[0]) << 24) | - (((unsigned int)ip_addr[1]) << 16) | - (((unsigned int)ip_addr[2]) << 8) | - (((unsigned int)ip_addr[3])); + val = (((nveu32_t)ip_addr[0]) << 24) | + (((nveu32_t)ip_addr[1]) << 16) | + (((nveu32_t)ip_addr[2]) << 8) | + (((nveu32_t)ip_addr[3])); osi_writela(osi_core, val, - (unsigned char *)addr + MGBE_MAC_ARPPA); + (nveu8_t *)addr + MGBE_MAC_ARPPA); mac_rmcr |= MGBE_MAC_RMCR_ARPEN; } else { mac_rmcr &= ~MGBE_MAC_RMCR_ARPEN; } - osi_writela(osi_core, mac_rmcr, (unsigned char *)addr + MGBE_MAC_RMCR); - - return 0; -} - -/** - * @brief mgbe_config_rxcsum_offload - Enable/Disale rx checksum offload in HW - * - * Algorithm: - * 1) Read the MAC configuration register. - * 2) Enable the IP checksum offload engine COE in MAC receiver. - * 3) Update the MAC configuration register. - * - * @param[in] addr: MGBE virtual base address. - * @param[in] enabled: Flag to indicate feature is to be enabled/disabled. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_config_rxcsum_offload( - struct osi_core_priv_data *const osi_core, - unsigned int enabled) -{ - void *addr = osi_core->base; - unsigned int mac_rmcr; - - if (enabled != OSI_ENABLE && enabled != OSI_DISABLE) { - return -1; - } - - mac_rmcr = osi_readla(osi_core, (unsigned char *)addr + MGBE_MAC_RMCR); - if (enabled == OSI_ENABLE) { - mac_rmcr |= MGBE_MAC_RMCR_IPC; - } else { - mac_rmcr &= ~MGBE_MAC_RMCR_IPC; - } - - osi_writela(osi_core, mac_rmcr, (unsigned char *)addr + MGBE_MAC_RMCR); + osi_writela(osi_core, mac_rmcr, (nveu8_t *)addr + MGBE_MAC_RMCR); return 0; } +#endif /* !OSI_STRIPPED_LIB */ /** * @brief mgbe_config_frp - Enable/Disale RX Flexible Receive Parser in HW @@ -1885,18 +854,19 @@ static int mgbe_config_rxcsum_offload( * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_frp(struct osi_core_priv_data *const osi_core, - const unsigned int enabled) +static nve32_t mgbe_config_frp(struct osi_core_priv_data *const osi_core, + const nveu32_t enabled) { - unsigned char *base = osi_core->base; - unsigned int op_mode = 0U, val = 0U; - int ret = -1; + nveu8_t *base = osi_core->base; + nveu32_t op_mode = 0U, val = 0U; + nve32_t ret = 0; - if (enabled != OSI_ENABLE && enabled != OSI_DISABLE) { + if ((enabled != OSI_ENABLE) && (enabled != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid enable input\n", enabled); - return -1; + ret = -1; + goto done; } op_mode = osi_readla(osi_core, base + MGBE_MTL_OP_MODE); @@ -1917,7 +887,8 @@ static int mgbe_config_frp(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to enable FRP\n", val); - return -1; + ret = -1; + goto done; } /* Enable FRP Interrupts in MTL_RXP_Interrupt_Control_Status */ @@ -1944,7 +915,8 @@ static int mgbe_config_frp(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to disable FRP\n", val); - return -1; + ret = -1; + goto done; } /* Disable FRP Interrupts in MTL_RXP_Interrupt_Control_Status */ @@ -1956,7 +928,8 @@ static int mgbe_config_frp(struct osi_core_priv_data *const osi_core, osi_writela(osi_core, val, base + MGBE_MTL_RXP_INTR_CS); } - return 0; +done: + return ret; } /** @@ -1976,20 +949,21 @@ static int mgbe_config_frp(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_frp_write(struct osi_core_priv_data *osi_core, - unsigned int acc_sel, - unsigned int addr, - unsigned int data) +static nve32_t mgbe_frp_write(struct osi_core_priv_data *osi_core, + nveu32_t acc_sel, + nveu32_t addr, + nveu32_t data) { - int ret = 0; - unsigned char *base = osi_core->base; - unsigned int val = 0U; + nve32_t ret = 0; + nveu8_t *base = osi_core->base; + nveu32_t val = 0U; - if (acc_sel != OSI_ENABLE && acc_sel != OSI_DISABLE) { + if ((acc_sel != OSI_ENABLE) && (acc_sel != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid acc_sel argment\n", acc_sel); - return -1; + ret = -1; + goto done; } /* Wait for ready */ @@ -2004,7 +978,8 @@ static int mgbe_frp_write(struct osi_core_priv_data *osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write\n", val); - return -1; + ret = -1; + goto done; } /* Write data into MTL_RXP_Indirect_Acc_Data */ @@ -2041,9 +1016,10 @@ static int mgbe_frp_write(struct osi_core_priv_data *osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Fail to write\n", val); - return -1; + ret = -1; } +done: return ret; } @@ -2061,19 +1037,20 @@ static int mgbe_frp_write(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, - const unsigned int pos, - struct osi_core_frp_data *const data) +static nve32_t mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, + const nveu32_t pos, + struct osi_core_frp_data *const data) { - unsigned int val = 0U, tmp = 0U; - int ret = -1; + nveu32_t val = 0U, tmp = 0U; + nve32_t ret = -1; /* Validate pos value */ if (pos >= OSI_FRP_MAX_ENTRY) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid FRP table entry\n", pos); - return -1; + ret = -1; + goto done; } /** Write Match Data into IE0 **/ @@ -2081,7 +1058,8 @@ static int mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = mgbe_frp_write(osi_core, OSI_DISABLE, MGBE_MTL_FRP_IE0(pos), val); if (ret < 0) { /* Match Data Write fail */ - return -1; + ret = -1; + goto done; } /** Write Match Enable into IE1 **/ @@ -2089,7 +1067,8 @@ static int mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = mgbe_frp_write(osi_core, OSI_DISABLE, MGBE_MTL_FRP_IE1(pos), val); if (ret < 0) { /* Match Enable Write fail */ - return -1; + ret = -1; + goto done; } /** Write AF, RF, IM, NIC, FO and OKI into IE2 **/ @@ -2119,7 +1098,8 @@ static int mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = mgbe_frp_write(osi_core, OSI_DISABLE, MGBE_MTL_FRP_IE2(pos), val); if (ret < 0) { /* FRP IE2 Write fail */ - return -1; + ret = -1; + goto done; } /** Write DCH into IE3 **/ @@ -2127,9 +1107,10 @@ static int mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, ret = mgbe_frp_write(osi_core, OSI_DISABLE, MGBE_MTL_FRP_IE3(pos), val); if (ret < 0) { /* DCH Write fail */ - return -1; + ret = -1; } +done: return ret; } @@ -2138,26 +1119,28 @@ static int mgbe_update_frp_entry(struct osi_core_priv_data *const osi_core, * * Algorithm: * - * @param[in] addr: MGBE virtual base address. - * @param[in] enabled: Flag to indicate feature is to be enabled/disabled. + * @param[in] osi_core: osi core priv data structure + * @param[in] nve: Number of Valid Entries. * * @note MAC should be init and started. see osi_start_mac() * * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_update_frp_nve(struct osi_core_priv_data *const osi_core, - const unsigned int nve) +static nve32_t mgbe_update_frp_nve(struct osi_core_priv_data *const osi_core, + const nveu32_t nve) { - unsigned int val; - unsigned char *base = osi_core->base; + nveu32_t val; + nveu8_t *base = osi_core->base; + nve32_t ret; /* Validate the NVE value */ if (nve >= OSI_FRP_MAX_ENTRY) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid NVE value\n", nve); - return -1; + ret = -1; + goto done; } /* Update NVE and NPE in MTL_RXP_Control_Status register */ @@ -2169,100 +1152,10 @@ static int mgbe_update_frp_nve(struct osi_core_priv_data *const osi_core, val |= ((nve << MGBE_MTL_RXP_CS_NPE_SHIFT) & MGBE_MTL_RXP_CS_NPE); osi_writela(osi_core, val, base + MGBE_MTL_RXP_CS); - return 0; -} + ret = 0; -/** - * @brief update_rfa_rfd - Update RFD and RSA values - * - * Algorithm: Calulates and stores the RSD (Threshold for Dectivating - * Flow control) and RSA (Threshold for Activating Flow Control) values - * based on the Rx FIFO size - * - * @param[in] rx_fifo: Rx FIFO size. - * @param[in] value: Stores RFD and RSA values - */ -static void update_rfa_rfd(unsigned int rx_fifo, unsigned int *value) -{ - switch (rx_fifo) { - case MGBE_21K: - /* Update RFD */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_18_K << - MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case MGBE_24K: - /* Update RFD */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_21_K << - MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case MGBE_27K: - /* Update RFD */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_24_K << - MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case MGBE_32K: - /* Update RFD */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_29_K << - MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case MGBE_38K: - case MGBE_48K: - case MGBE_64K: - case MGBE_96K: - case MGBE_192K: - /* Update RFD */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_32_K << - MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - break; - case MGBE_19K: - default: - /* Update RFD */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - *value |= (FULL_MINUS_4_K << - MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFD_MASK; - /* Update RFA */ - *value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - *value |= (FULL_MINUS_16_K << - MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & - MGBE_MTL_RXQ_OP_MODE_RFA_MASK; - break; - } +done: + return ret; } /** @@ -2278,21 +1171,49 @@ static void update_rfa_rfd(unsigned int rx_fifo, unsigned int *value) * 6) Enable Rx Queues * 7) Enable TX Underflow Interrupt for MTL Q * - * @param[in] qinx: Queue number that need to be configured. - * @param[in] osi_core: OSI core private data. - * @param[in] tx_fifo: MTL TX queue size for a MTL queue. - * @param[in] rx_fifo: MTL RX queue size for a MTL queue. + * @param[in] osi_core: OSI core private data structure. + * @param[in] hw_qinx: Queue number that need to be configured. * * @note MAC has to be out of reset. * * @retval 0 on success * @retval -1 on failure. */ -static nve32_t mgbe_configure_mtl_queue(nveu32_t qinx, - struct osi_core_priv_data *osi_core, - nveu32_t tx_fifo, - nveu32_t rx_fifo) -{ +static nve32_t mgbe_configure_mtl_queue(struct osi_core_priv_data *osi_core, + nveu32_t hw_qinx) +{ + nveu32_t qinx = hw_qinx & 0xFU; + /* + * Total available Rx queue size is 192KB. + * Below is the destribution among the Rx queueu - + * Q0 - 160KB + * Q1 to Q8 - 2KB each = 8 * 2KB = 16KB + * Q9 - 16KB (MVBCQ) + * + * Formula to calculate the value to be programmed in HW + * + * vale= (size in KB / 256) - 1U + */ + const nveu32_t rx_fifo_sz[OSI_MGBE_MAX_NUM_QUEUES] = { + FIFO_SZ(160U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), + FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(2U), FIFO_SZ(16U), + }; + const nveu32_t tx_fifo_sz[OSI_MGBE_MAX_NUM_QUEUES] = { + TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, + TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, TX_FIFO_SZ, + }; + const nveu32_t rfd_rfa[OSI_MGBE_MAX_NUM_QUEUES] = { + FULL_MINUS_32_K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + FULL_MINUS_1_5K, + }; nveu32_t value = 0; nve32_t ret = 0; @@ -2315,25 +1236,33 @@ static nve32_t mgbe_configure_mtl_queue(nveu32_t qinx, * Setting related to CBS will come here for TC. * default: 0x0 SP */ - ret = mgbe_flush_mtl_tx_queue(osi_core, qinx); + ret = hw_flush_mtl_tx_queue(osi_core, qinx); if (ret < 0) { - return ret; + goto fail; } - value = (tx_fifo << MGBE_MTL_TXQ_SIZE_SHIFT); + if (osi_unlikely((qinx >= OSI_MGBE_MAX_NUM_QUEUES) || + (osi_core->tc[qinx] >= OSI_MAX_TC_NUM))) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Incorrect queues/TC number\n", 0ULL); + ret = -1; + goto fail; + } + + value = (tx_fifo_sz[qinx] << MGBE_MTL_TXQ_SIZE_SHIFT); /* Enable Store and Forward mode */ value |= MGBE_MTL_TSF; /*TTC not applicable for TX*/ /* Enable TxQ */ value |= MGBE_MTL_TXQEN; value |= (osi_core->tc[qinx] << MGBE_MTL_CHX_TX_OP_MODE_Q2TC_SH); - osi_writela(osi_core, value, (unsigned char *) + osi_writela(osi_core, value, (nveu8_t *) osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx)); /* read RX Q0 Operating Mode Register */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_CHX_RX_OP_MODE(qinx)); - value |= (rx_fifo << MGBE_MTL_RXQ_SIZE_SHIFT); + value |= (rx_fifo_sz[qinx] << MGBE_MTL_RXQ_SIZE_SHIFT); /* Enable Store and Forward mode */ value |= MGBE_MTL_RSF; /* Enable HW flow control */ @@ -2346,18 +1275,30 @@ static nve32_t mgbe_configure_mtl_queue(nveu32_t qinx, * RFA: Threshold for Activating Flow Control * RFD: Threshold for Deactivating Flow Control */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_FLOW_CTRL(qinx)); - update_rfa_rfd(rx_fifo, &value); - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + value &= ~MGBE_MTL_RXQ_OP_MODE_RFD_MASK; + value &= ~MGBE_MTL_RXQ_OP_MODE_RFA_MASK; + value |= (rfd_rfa[qinx] << MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT) & MGBE_MTL_RXQ_OP_MODE_RFD_MASK; + value |= (rfd_rfa[qinx] << MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT) & MGBE_MTL_RXQ_OP_MODE_RFA_MASK; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_FLOW_CTRL(qinx)); - /* Transmit Queue weight */ + /* Transmit Queue weight, all TX weights are equal */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_QW(qinx)); - value |= (MGBE_MTL_TCQ_QW_ISCQW + qinx); + value |= MGBE_MTL_TCQ_QW_ISCQW; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_QW(qinx)); + + /* Default ETS tx selection algo */ + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MGBE_MTL_TCQ_ETS_CR(osi_core->tc[qinx])); + value &= ~MGBE_MTL_TCQ_ETS_CR_AVALG; + value |= OSI_MGBE_TXQ_AVALG_ETS; + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + MGBE_MTL_TCQ_ETS_CR(osi_core->tc[qinx])); + /* Enable Rx Queue Control */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MAC_RQC0R); @@ -2365,16 +1306,11 @@ static nve32_t mgbe_configure_mtl_queue(nveu32_t qinx, (MGBE_MAC_RXQC0_RXQEN_SHIFT(qinx))); osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_RQC0R); - - /* Enable TX Underflow Interrupt for MTL Q */ - value = osi_readl((unsigned char *)osi_core->base + - MGBE_MTL_QINT_ENABLE(qinx)); - value |= MGBE_MTL_QINT_TXUIE; - osi_writel(value, (unsigned char *)osi_core->base + - MGBE_MTL_QINT_ENABLE(qinx)); - return 0; +fail: + return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief mgbe_rss_write_reg - Write into RSS registers * @@ -2390,16 +1326,16 @@ static nve32_t mgbe_configure_mtl_queue(nveu32_t qinx, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_rss_write_reg(struct osi_core_priv_data *osi_core, - unsigned int idx, - unsigned int value, - unsigned int is_key) +static nve32_t mgbe_rss_write_reg(struct osi_core_priv_data *osi_core, + nveu32_t idx, + nveu32_t value, + nveu32_t is_key) { - unsigned char *addr = (unsigned char *)osi_core->base; - unsigned int retry = 100; - unsigned int ctrl = 0; - unsigned int count = 0; - int cond = 1; + nveu8_t *addr = (nveu8_t *)osi_core->base; + nveu32_t retry = 100; + nveu32_t ctrl = 0; + nveu32_t count = 0; + nve32_t cond = 1; /* data into RSS Lookup Table or RSS Hash Key */ osi_writela(osi_core, value, addr + MGBE_MAC_RSS_DATA); @@ -2416,7 +1352,7 @@ static int mgbe_rss_write_reg(struct osi_core_priv_data *osi_core, /* poll for write operation to complete */ while (cond == 1) { if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to update RSS Hash key or table\n", 0ULL); return -1; @@ -2447,12 +1383,12 @@ static int mgbe_rss_write_reg(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_rss(struct osi_core_priv_data *osi_core) +static nve32_t mgbe_config_rss(struct osi_core_priv_data *osi_core) { - unsigned char *addr = (unsigned char *)osi_core->base; - unsigned int value = 0; - unsigned int i = 0, j = 0; - int ret = 0; + nveu8_t *addr = (nveu8_t *)osi_core->base; + nveu32_t value = 0; + nveu32_t i = 0, j = 0; + nve32_t ret = 0; if (osi_core->rss.enable == OSI_DISABLE) { /* RSS not supported */ @@ -2466,10 +1402,10 @@ static int mgbe_config_rss(struct osi_core_priv_data *osi_core) /* Program the hash key */ for (i = 0; i < OSI_RSS_HASH_KEY_SIZE; i += 4U) { - value = ((unsigned int)osi_core->rss.key[i] | - (unsigned int)osi_core->rss.key[i + 1U] << 8U | - (unsigned int)osi_core->rss.key[i + 2U] << 16U | - (unsigned int)osi_core->rss.key[i + 3U] << 24U); + value = ((nveu32_t)osi_core->rss.key[i] | + ((nveu32_t)osi_core->rss.key[i + 1U] << 8U) | + ((nveu32_t)osi_core->rss.key[i + 2U] << 16U) | + ((nveu32_t)osi_core->rss.key[i + 3U] << 24U)); ret = mgbe_rss_write_reg(osi_core, j, value, OSI_ENABLE); if (ret < 0) { return ret; @@ -2506,10 +1442,10 @@ static int mgbe_config_rss(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_config_flow_control(struct osi_core_priv_data *const osi_core, - const nveu32_t flw_ctrl) +static nve32_t mgbe_config_flow_control(struct osi_core_priv_data *const osi_core, + const nveu32_t flw_ctrl) { - unsigned int val; + nveu32_t val; void *addr = osi_core->base; /* return on invalid argument */ @@ -2520,7 +1456,7 @@ static int mgbe_config_flow_control(struct osi_core_priv_data *const osi_core, /* Configure MAC Tx Flow control */ /* Read MAC Tx Flow control Register of Q0 */ val = osi_readla(osi_core, - (unsigned char *)addr + MGBE_MAC_QX_TX_FLW_CTRL(0U)); + (nveu8_t *)addr + MGBE_MAC_QX_TX_FLW_CTRL(0U)); /* flw_ctrl BIT0: 1 is for tx flow ctrl enable * flw_ctrl BIT0: 0 is for tx flow ctrl disable @@ -2538,12 +1474,12 @@ static int mgbe_config_flow_control(struct osi_core_priv_data *const osi_core, /* Write to MAC Tx Flow control Register of Q0 */ osi_writela(osi_core, val, - (unsigned char *)addr + MGBE_MAC_QX_TX_FLW_CTRL(0U)); + (nveu8_t *)addr + MGBE_MAC_QX_TX_FLW_CTRL(0U)); /* Configure MAC Rx Flow control*/ /* Read MAC Rx Flow control Register */ val = osi_readla(osi_core, - (unsigned char *)addr + MGBE_MAC_RX_FLW_CTRL); + (nveu8_t *)addr + MGBE_MAC_RX_FLW_CTRL); /* flw_ctrl BIT1: 1 is for rx flow ctrl enable * flw_ctrl BIT1: 0 is for rx flow ctrl disable @@ -2558,10 +1494,11 @@ static int mgbe_config_flow_control(struct osi_core_priv_data *const osi_core, /* Write to MAC Rx Flow control Register */ osi_writela(osi_core, val, - (unsigned char *)addr + MGBE_MAC_RX_FLW_CTRL); + (nveu8_t *)addr + MGBE_MAC_RX_FLW_CTRL); return 0; } +#endif /* !OSI_STRIPPED_LIB */ #ifdef HSI_SUPPORT /** @@ -2575,28 +1512,28 @@ static int mgbe_config_flow_control(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure */ -static int mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, - const nveu32_t enable) +static nve32_t mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, + const nveu32_t enable) { nveu32_t value = 0U; - int ret = 0; + nve32_t ret = 0; + const nveu16_t osi_hsi_reporter_id[] = { + OSI_HSI_MGBE0_REPORTER_ID, + OSI_HSI_MGBE1_REPORTER_ID, + OSI_HSI_MGBE2_REPORTER_ID, + OSI_HSI_MGBE3_REPORTER_ID, + }; if (enable == OSI_ENABLE) { osi_core->hsi.enabled = OSI_ENABLE; - osi_core->hsi.reporter_id = hsi_err_code[osi_core->instance_id][REPORTER_IDX]; + osi_core->hsi.reporter_id = osi_hsi_reporter_id[osi_core->instance_id]; - /* T23X-MGBE_HSIv2-10 Enable PCS ECC */ - value = (EN_ERR_IND | FEC_EN); - ret = xpcs_write_safety(osi_core, XPCS_BASE_PMA_MMD_SR_PMA_KR_FEC_CTRL, value); - if (ret != 0) { - return ret; - } /* T23X-MGBE_HSIv2-12:Initialization of Transaction Timeout in PCS */ /* T23X-MGBE_HSIv2-11:Initialization of Watchdog Timer */ value = (0xCCU << XPCS_SFTY_1US_MULT_SHIFT) & XPCS_SFTY_1US_MULT_MASK; ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_SFTY_TMR_CTRL, value); if (ret != 0) { - return ret; + goto fail; } /* T23X-MGBE_HSIv2-1 Configure ECC */ value = osi_readla(osi_core, @@ -2612,15 +1549,15 @@ static int mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, /* T23X-MGBE_HSIv2-5: Enabling and Initialization of Transaction Timeout */ value = (0x198U << MGBE_TMR_SHIFT) & MGBE_TMR_MASK; - value |= (0x0U << MGBE_CTMR_SHIFT) & MGBE_CTMR_MASK; - value |= (0x2U << MGBE_LTMRMD_SHIFT) & MGBE_LTMRMD_MASK; - value |= (0x1U << MGBE_NTMRMD_SHIFT) & MGBE_NTMRMD_MASK; + value |= ((nveu32_t)0x0U << MGBE_CTMR_SHIFT) & MGBE_CTMR_MASK; + value |= ((nveu32_t)0x2U << MGBE_LTMRMD_SHIFT) & MGBE_LTMRMD_MASK; + value |= ((nveu32_t)0x2U << MGBE_NTMRMD_SHIFT) & MGBE_NTMRMD_MASK; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_DWCXG_CORE_MAC_FSM_ACT_TIMER); /* T23X-MGBE_HSIv2-3: Enabling and Initialization of Watchdog Timer */ /* T23X-MGBE_HSIv2-4: Enabling of Consistency Monitor for XGMAC FSM State */ - // TODO: enable MGBE_TMOUTEN. + /* TODO enable MGBE_TMOUTEN. Bug 3584387 */ value = MGBE_PRTYEN; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_FSM_CONTROL); @@ -2675,15 +1612,10 @@ static int mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, } else { osi_core->hsi.enabled = OSI_DISABLE; - /* T23X-MGBE_HSIv2-10 Disable PCS ECC */ - ret = xpcs_write_safety(osi_core, XPCS_BASE_PMA_MMD_SR_PMA_KR_FEC_CTRL, 0); - if (ret != 0) { - return ret; - } /* T23X-MGBE_HSIv2-11:Deinitialization of Watchdog Timer */ ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_SFTY_TMR_CTRL, 0); if (ret != 0) { - return ret; + goto fail; } /* T23X-MGBE_HSIv2-1 Disable ECC */ value = osi_readla(osi_core, @@ -2742,6 +1674,56 @@ static int mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, osi_writela(osi_core, value, (nveu8_t *)osi_core->xpcs_base + XPCS_WRAP_INTERRUPT_CONTROL); } +fail: + return ret; +} + +/** + * @brief mgbe_hsi_inject_err - Inject error + * + * Algorithm: Use error injection method to induce error + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] error_code: HSI Error code + * + * @retval 0 on success + * @retval -1 on failure + */ +static nve32_t mgbe_hsi_inject_err(struct osi_core_priv_data *const osi_core, + const nveu32_t error_code) +{ + const nveu32_t val_ce = (MGBE_MTL_DEBUG_CONTROL_FDBGEN | + MGBE_MTL_DEBUG_CONTROL_DBGMOD | + MGBE_MTL_DEBUG_CONTROL_FIFORDEN | + MGBE_MTL_DEBUG_CONTROL_EIEE | + MGBE_MTL_DEBUG_CONTROL_EIEC); + + const nveu32_t val_ue = (MGBE_MTL_DEBUG_CONTROL_FDBGEN | + MGBE_MTL_DEBUG_CONTROL_DBGMOD | + MGBE_MTL_DEBUG_CONTROL_FIFORDEN | + MGBE_MTL_DEBUG_CONTROL_EIEE); + nve32_t ret = 0; + + switch (error_code) { + case OSI_HSI_MGBE0_CE_CODE: + case OSI_HSI_MGBE1_CE_CODE: + case OSI_HSI_MGBE2_CE_CODE: + case OSI_HSI_MGBE3_CE_CODE: + osi_writela(osi_core, val_ce, (nveu8_t *)osi_core->base + + MGBE_MTL_DEBUG_CONTROL); + break; + case OSI_HSI_MGBE0_UE_CODE: + case OSI_HSI_MGBE1_UE_CODE: + case OSI_HSI_MGBE2_UE_CODE: + case OSI_HSI_MGBE3_UE_CODE: + osi_writela(osi_core, val_ue, (nveu8_t *)osi_core->base + + MGBE_MTL_DEBUG_CONTROL); + break; + default: + ret = hsi_common_error_inject(osi_core, error_code); + break; + } + return ret; } #endif @@ -2764,9 +1746,9 @@ static int mgbe_hsi_configure(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) +static nve32_t mgbe_configure_mac(struct osi_core_priv_data *osi_core) { - unsigned int value = 0U, max_queue = 0U, i = 0U; + nveu32_t value = 0U, max_queue = 0U, i = 0U; /* TODO: Need to check if we need to enable anything in Tx configuration * value = osi_readla(osi_core, @@ -2780,14 +1762,14 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) value |= MGBE_MAC_RMCR_ACS | MGBE_MAC_RMCR_CST | MGBE_MAC_RMCR_IPC; /* Jumbo Packet Enable */ - if (osi_core->mtu > OSI_DFLT_MTU_SIZE && - osi_core->mtu <= OSI_MTU_SIZE_9000) { + if ((osi_core->mtu > OSI_DFLT_MTU_SIZE) && + (osi_core->mtu <= OSI_MTU_SIZE_9000)) { value |= MGBE_MAC_RMCR_JE; } else if (osi_core->mtu > OSI_MTU_SIZE_9000){ /* if MTU greater 9K use GPSLCE */ value |= MGBE_MAC_RMCR_GPSLCE | MGBE_MAC_RMCR_WD; value &= ~MGBE_MAC_RMCR_GPSL_MSK; - value |= ((OSI_MAX_MTU_SIZE << 16) & MGBE_MAC_RMCR_GPSL_MSK); + value |= ((((nveu32_t)OSI_MAX_MTU_SIZE) << 16U) & MGBE_MAC_RMCR_GPSL_MSK); } else { value &= ~MGBE_MAC_RMCR_JE; value &= ~MGBE_MAC_RMCR_GPSLCE; @@ -2795,10 +1777,10 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) } osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_RMCR); + (nveu8_t *)osi_core->base + MGBE_MAC_RMCR); value = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_TMCR); + (nveu8_t *)osi_core->base + MGBE_MAC_TMCR); /* DDIC bit set is needed to improve MACSEC Tput */ value |= MGBE_MAC_TMCR_DDIC; /* Jabber Disable */ @@ -2806,11 +1788,11 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) value |= MGBE_MAC_TMCR_JD; } osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_TMCR); + (nveu8_t *)osi_core->base + MGBE_MAC_TMCR); /* Enable Multicast and Broadcast Queue */ value = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_RQC1R); + (nveu8_t *)osi_core->base + MGBE_MAC_RQC1R); value |= MGBE_MAC_RQC1R_MCBCQEN; /* Set MCBCQ to highest enabled RX queue index */ for (i = 0; i < osi_core->num_mtl_queues; i++) { @@ -2823,7 +1805,7 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) value &= ~(MGBE_MAC_RQC1R_MCBCQ); value |= (max_queue << MGBE_MAC_RQC1R_MCBCQ_SHIFT); osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_RQC1R); + (nveu8_t *)osi_core->base + MGBE_MAC_RQC1R); /* Disable all MMC nve32_terrupts */ /* Disable all MMC Tx nve32_terrupts */ @@ -2847,19 +1829,22 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) /* RGSMIIIM - RGMII/SMII interrupt and TSIE Enable */ /* TXESIE - Transmit Error Status Interrupt Enable */ /* TODO: LPI need to be enabled during EEE implementation */ - value |= (MGBE_IMR_RGSMIIIE | MGBE_IMR_TSIE | MGBE_IMR_TXESIE); +#ifndef OSI_STRIPPED_LIB + value |= (MGBE_IMR_TXESIE); +#endif + value |= (MGBE_IMR_RGSMIIIE | MGBE_IMR_TSIE); osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_IER); /* Enable common interrupt at wrapper level */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_WRAP_COMMON_INTR_ENABLE); value |= MGBE_MAC_SBD_INTR; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_WRAP_COMMON_INTR_ENABLE); /* Enable VLAN configuration */ value = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_VLAN_TR); + (nveu8_t *)osi_core->base + MGBE_MAC_VLAN_TR); /* Enable VLAN Tag in RX Status * Disable double VLAN Tag processing on TX and RX */ @@ -2869,17 +1854,18 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) } value |= MGBE_MAC_VLANTR_EVLRXS | MGBE_MAC_VLANTR_DOVLTC; osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_VLAN_TR); + (nveu8_t *)osi_core->base + MGBE_MAC_VLAN_TR); value = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_VLANTIR); + (nveu8_t *)osi_core->base + MGBE_MAC_VLANTIR); /* Enable VLAN tagging through context descriptor */ value |= MGBE_MAC_VLANTIR_VLTI; /* insert/replace C_VLAN in 13th & 14th bytes of transmitted frames */ value &= ~MGBE_MAC_VLANTIRR_CSVL; osi_writela(osi_core, value, - (unsigned char *)osi_core->base + MGBE_MAC_VLANTIR); + (nveu8_t *)osi_core->base + MGBE_MAC_VLANTIR); +#ifndef OSI_STRIPPED_LIB /* Configure default flow control settings */ if (osi_core->pause_frames == OSI_PAUSE_FRAMES_ENABLE) { osi_core->flow_ctrl = (OSI_FLOW_CTRL_TX | OSI_FLOW_CTRL_RX); @@ -2893,7 +1879,10 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) /* TODO: USP (user Priority) to RxQ Mapping */ /* RSS cofiguration */ - return mgbe_config_rss(osi_core); + mgbe_config_rss(osi_core); +#endif /* !OSI_STRIPPED_LIB */ + + return 0; } /** @@ -2909,8 +1898,7 @@ static int mgbe_configure_mac(struct osi_core_priv_data *osi_core) * * @note MAC has to be out of reset. */ -static void mgbe_configure_dma(struct osi_core_priv_data *osi_core, - nveu32_t pre_si) +static void mgbe_configure_dma(struct osi_core_priv_data *osi_core) { nveu32_t value = 0; @@ -2931,308 +1919,18 @@ static void mgbe_configure_dma(struct osi_core_priv_data *osi_core, /* Configure TDPS to 5 */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_DMA_TX_EDMA_CTRL); - if (pre_si == OSI_ENABLE) { - /* For Pre silicon TDPS Value is 3 */ - value |= MGBE_DMA_TX_EDMA_CTRL_TDPS_PRESI; - } else { - value |= MGBE_DMA_TX_EDMA_CTRL_TDPS; - } + value |= MGBE_DMA_TX_EDMA_CTRL_TDPS; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_DMA_TX_EDMA_CTRL); /* Configure RDPS to 5 */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_DMA_RX_EDMA_CTRL); - if (pre_si == OSI_ENABLE) { - /* For Pre silicon RDPS Value is 3 */ - value |= MGBE_DMA_RX_EDMA_CTRL_RDPS_PRESI; - } else { - value |= MGBE_DMA_RX_EDMA_CTRL_RDPS; - } + value |= MGBE_DMA_RX_EDMA_CTRL_RDPS; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_DMA_RX_EDMA_CTRL); } -/** - * @brief Initialize the osi_core->backup_config. - * - * Algorithm: Populate the list of core registers to be saved during suspend. - * Fill the address of each register in structure. - * - * @param[in] osi_core: OSI core private data structure. - * - * @retval none - */ -static void mgbe_core_backup_init(struct osi_core_priv_data *const osi_core) -{ - struct core_backup *config = &osi_core->backup_config; - unsigned char *base = (unsigned char *)osi_core->base; - unsigned int i; - - /* MAC registers backup */ - config->reg_addr[MGBE_MAC_TMCR_BAK_IDX] = base + MGBE_MAC_TMCR; - config->reg_addr[MGBE_MAC_RMCR_BAK_IDX] = base + MGBE_MAC_RMCR; - config->reg_addr[MGBE_MAC_PFR_BAK_IDX] = base + MGBE_MAC_PFR; - config->reg_addr[MGBE_MAC_VLAN_TAG_BAK_IDX] = base + - MGBE_MAC_VLAN_TR; - config->reg_addr[MGBE_MAC_VLANTIR_BAK_IDX] = base + MGBE_MAC_VLANTIR; - config->reg_addr[MGBE_MAC_RX_FLW_CTRL_BAK_IDX] = base + - MGBE_MAC_RX_FLW_CTRL; - config->reg_addr[MGBE_MAC_RQC0R_BAK_IDX] = base + MGBE_MAC_RQC0R; - config->reg_addr[MGBE_MAC_RQC1R_BAK_IDX] = base + MGBE_MAC_RQC1R; - config->reg_addr[MGBE_MAC_RQC2R_BAK_IDX] = base + MGBE_MAC_RQC2R; - config->reg_addr[MGBE_MAC_ISR_BAK_IDX] = base + MGBE_MAC_ISR; - config->reg_addr[MGBE_MAC_IER_BAK_IDX] = base + MGBE_MAC_IER; - config->reg_addr[MGBE_MAC_PMTCSR_BAK_IDX] = base + MGBE_MAC_PMTCSR; - config->reg_addr[MGBE_MAC_LPI_CSR_BAK_IDX] = base + MGBE_MAC_LPI_CSR; - config->reg_addr[MGBE_MAC_LPI_TIMER_CTRL_BAK_IDX] = base + - MGBE_MAC_LPI_TIMER_CTRL; - config->reg_addr[MGBE_MAC_LPI_EN_TIMER_BAK_IDX] = base + - MGBE_MAC_LPI_EN_TIMER; - config->reg_addr[MGBE_MAC_TCR_BAK_IDX] = base + MGBE_MAC_TCR; - config->reg_addr[MGBE_MAC_SSIR_BAK_IDX] = base + MGBE_MAC_SSIR; - config->reg_addr[MGBE_MAC_STSR_BAK_IDX] = base + MGBE_MAC_STSR; - config->reg_addr[MGBE_MAC_STNSR_BAK_IDX] = base + MGBE_MAC_STNSR; - config->reg_addr[MGBE_MAC_STSUR_BAK_IDX] = base + MGBE_MAC_STSUR; - config->reg_addr[MGBE_MAC_STNSUR_BAK_IDX] = base + MGBE_MAC_STNSUR; - config->reg_addr[MGBE_MAC_TAR_BAK_IDX] = base + MGBE_MAC_TAR; - config->reg_addr[MGBE_DMA_BMR_BAK_IDX] = base + MGBE_DMA_MODE; - config->reg_addr[MGBE_DMA_SBUS_BAK_IDX] = base + MGBE_DMA_SBUS; - config->reg_addr[MGBE_DMA_ISR_BAK_IDX] = base + MGBE_DMA_ISR; - config->reg_addr[MGBE_MTL_OP_MODE_BAK_IDX] = base + MGBE_MTL_OP_MODE; - config->reg_addr[MGBE_MTL_RXQ_DMA_MAP0_BAK_IDX] = base + - MGBE_MTL_RXQ_DMA_MAP0; - - for (i = 0; i < MGBE_MAX_HTR_REGS; i++) { - config->reg_addr[MGBE_MAC_HTR_REG_BAK_IDX(i)] = base + - MGBE_MAC_HTR_REG(i); - } - for (i = 0; i < OSI_MGBE_MAX_NUM_QUEUES; i++) { - config->reg_addr[MGBE_MAC_QX_TX_FLW_CTRL_BAK_IDX(i)] = base + - MGBE_MAC_QX_TX_FLW_CTRL(i); - } - for (i = 0; i < OSI_MGBE_MAX_MAC_ADDRESS_FILTER; i++) { - config->reg_addr[MGBE_MAC_ADDRH_BAK_IDX(i)] = base + - MGBE_MAC_ADDRH(i); - config->reg_addr[MGBE_MAC_ADDRL_BAK_IDX(i)] = base + - MGBE_MAC_ADDRL(i); - } - for (i = 0; i < OSI_MGBE_MAX_NUM_QUEUES; i++) { - config->reg_addr[MGBE_MTL_CHX_TX_OP_MODE_BAK_IDX(i)] = base + - MGBE_MTL_CHX_TX_OP_MODE(i); - config->reg_addr[MGBE_MTL_CHX_RX_OP_MODE_BAK_IDX(i)] = base + - MGBE_MTL_CHX_RX_OP_MODE(i); - } - for (i = 0; i < OSI_MAX_TC_NUM; i++) { - config->reg_addr[MGBE_MTL_TXQ_ETS_CR_BAK_IDX(i)] = base + - MGBE_MTL_TCQ_ETS_CR(i); - config->reg_addr[MGBE_MTL_TXQ_QW_BAK_IDX(i)] = base + - MGBE_MTL_TCQ_QW(i); - config->reg_addr[MGBE_MTL_TXQ_ETS_SSCR_BAK_IDX(i)] = base + - MGBE_MTL_TCQ_ETS_SSCR(i); - config->reg_addr[MGBE_MTL_TXQ_ETS_HCR_BAK_IDX(i)] = base + - MGBE_MTL_TCQ_ETS_HCR(i); - config->reg_addr[MGBE_MTL_TXQ_ETS_LCR_BAK_IDX(i)] = base + - MGBE_MTL_TCQ_ETS_LCR(i); - } - - /* TODO: Add wrapper register backup */ -} - -/** - * @brief mgbe_enable_mtl_interrupts - Enable MTL interrupts - * - * Algorithm: enable MTL interrupts for EST - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void mgbe_enable_mtl_interrupts( - struct osi_core_priv_data *osi_core) -{ - unsigned int mtl_est_ir = OSI_DISABLE; - - mtl_est_ir = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MTL_EST_ITRE); - /* enable only MTL interrupt realted to - * Constant Gate Control Error - * Head-Of-Line Blocking due to Scheduling - * Head-Of-Line Blocking due to Frame Size - * BTR Error - * Switch to S/W owned list Complete - */ - mtl_est_ir |= (MGBE_MTL_EST_ITRE_CGCE | MGBE_MTL_EST_ITRE_IEHS | - MGBE_MTL_EST_ITRE_IEHF | MGBE_MTL_EST_ITRE_IEBE | - MGBE_MTL_EST_ITRE_IECC); - osi_writela(osi_core, mtl_est_ir, - (unsigned char *)osi_core->base + MGBE_MTL_EST_ITRE); -} - -/** - * @brief mgbe_enable_fpe_interrupts - Enable MTL interrupts - * - * Algorithm: enable FPE interrupts - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void mgbe_enable_fpe_interrupts( - struct osi_core_priv_data *osi_core) -{ - unsigned int value = OSI_DISABLE; - - /* Read MAC IER Register and enable Frame Preemption Interrupt - * Enable */ - value = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MAC_IER); - value |= MGBE_IMR_FPEIE; - osi_writela(osi_core, value, (unsigned char *) - osi_core->base + MGBE_MAC_IER); -} - -/** - * @brief mgbe_save_gcl_params - save GCL configs in local core structure - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static inline void mgbe_save_gcl_params(struct osi_core_priv_data *osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - unsigned int gcl_widhth[4] = {0, OSI_MAX_24BITS, OSI_MAX_28BITS, - OSI_MAX_32BITS}; - nveu32_t gcl_ti_mask[4] = {0, OSI_MASK_16BITS, OSI_MASK_20BITS, - OSI_MASK_24BITS}; - unsigned int gcl_depthth[6] = {0, OSI_GCL_SIZE_64, OSI_GCL_SIZE_128, - OSI_GCL_SIZE_256, OSI_GCL_SIZE_512, - OSI_GCL_SIZE_1024}; - - if (osi_core->hw_feature->gcl_width == 0 || - osi_core->hw_feature->gcl_width > 3) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Wrong HW feature GCL width\n", - (unsigned long long)osi_core->hw_feature->gcl_width); - } else { - l_core->gcl_width_val = - gcl_widhth[osi_core->hw_feature->gcl_width]; - l_core->ti_mask = gcl_ti_mask[osi_core->hw_feature->gcl_width]; - } - - if (osi_core->hw_feature->gcl_depth == 0 || - osi_core->hw_feature->gcl_depth > 5) { - /* Do Nothing */ - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "Wrong HW feature GCL depth\n", - (unsigned long long)osi_core->hw_feature->gcl_depth); - } else { - l_core->gcl_dep = gcl_depthth[osi_core->hw_feature->gcl_depth]; - } -} - -/** - * @brief mgbe_tsn_init - initialize TSN feature - * - * Algorithm: - * 1) If hardware support EST, - * a) Set default EST configuration - * b) Set enable interrupts - * 2) If hardware supports FPE - * a) Set default FPE configuration - * b) enable interrupts - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] est_sel: EST HW support present or not - * @param[in] fpe_sel: FPE HW support present or not - * - * @note MAC should be init and started. see osi_start_mac() - */ -static void mgbe_tsn_init(struct osi_core_priv_data *osi_core, - unsigned int est_sel, unsigned int fpe_sel) -{ - unsigned int val = 0x0; - unsigned int temp = 0U; - - if (est_sel == OSI_ENABLE) { - mgbe_save_gcl_params(osi_core); - val = osi_readla(osi_core, (unsigned char *)osi_core->base + - MGBE_MTL_EST_CONTROL); - - /* - * PTOV PTP clock period * 6 - * dual-port RAM based asynchronous FIFO controllers or - * Single-port RAM based synchronous FIFO controllers - * CTOV 96 x Tx clock period - * : - * : - * set other default value - */ - val &= ~MGBE_MTL_EST_CONTROL_PTOV; - if (osi_core->pre_si == OSI_ENABLE) { - /* 6*1/(78.6 MHz) in ns*/ - temp = (6U * 13U); - } else { - temp = MGBE_MTL_EST_PTOV_RECOMMEND; - } - temp = temp << MGBE_MTL_EST_CONTROL_PTOV_SHIFT; - val |= temp; - - val &= ~MGBE_MTL_EST_CONTROL_CTOV; - temp = MGBE_MTL_EST_CTOV_RECOMMEND; - temp = temp << MGBE_MTL_EST_CONTROL_CTOV_SHIFT; - val |= temp; - - /*Loop Count to report Scheduling Error*/ - val &= ~MGBE_MTL_EST_CONTROL_LCSE; - val |= MGBE_MTL_EST_CONTROL_LCSE_VAL; - - val &= ~MGBE_MTL_EST_CONTROL_DDBF; - val |= MGBE_MTL_EST_CONTROL_DDBF; - osi_writela(osi_core, val, (unsigned char *)osi_core->base + - MGBE_MTL_EST_CONTROL); - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MTL_EST_OVERHEAD); - val &= ~MGBE_MTL_EST_OVERHEAD_OVHD; - /* As per hardware programming info */ - val |= MGBE_MTL_EST_OVERHEAD_RECOMMEND; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - MGBE_MTL_EST_OVERHEAD); - - mgbe_enable_mtl_interrupts(osi_core); - } - - if (fpe_sel == OSI_ENABLE) { - val = osi_readla(osi_core, (unsigned char *)osi_core->base + - MGBE_MAC_RQC1R); - val &= ~MGBE_MAC_RQC1R_RQ; - temp = osi_core->residual_queue; - temp = temp << MGBE_MAC_RQC1R_RQ_SHIFT; - temp = (temp & MGBE_MAC_RQC1R_RQ); - val |= temp; - osi_writela(osi_core, val, (unsigned char *)osi_core->base + - MGBE_MAC_RQC1R); - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MAC_RQC4R); - val &= ~MGBE_MAC_RQC4R_PMCBCQ; - temp = osi_core->residual_queue; - temp = temp << MGBE_MAC_RQC4R_PMCBCQ_SHIFT; - temp = (temp & MGBE_MAC_RQC4R_PMCBCQ); - val |= temp; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - MGBE_MAC_RQC4R); - - mgbe_enable_fpe_interrupts(osi_core); - } - - /* CBS setting for TC or TXQ for default configuration - user application should use IOCTL to set CBS as per requirement - */ -} - /** * @brief Map DMA channels to a specific VM IRQ. * @@ -3246,7 +1944,9 @@ static void mgbe_tsn_init(struct osi_core_priv_data *osi_core, */ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) { +#ifndef OSI_STRIPPED_LIB nveu32_t sid[4] = { MGBE0_SID, MGBE1_SID, MGBE2_SID, MGBE3_SID }; +#endif struct osi_vm_irq_data *irq_data; nveu32_t i, j; nveu32_t chan; @@ -3269,6 +1969,7 @@ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) (nveu8_t *)osi_core->base + MGBE_VIRTUAL_APB_ERR_CTRL); } +#ifndef OSI_STRIPPED_LIB if ((osi_core->use_virtualization == OSI_DISABLE) && (osi_core->hv_base != OSI_NULL)) { if (osi_core->instance_id > 3U) { @@ -3290,7 +1991,7 @@ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) (nveu8_t *)osi_core->hv_base + MGBE_WRAP_AXI_ASID2_CTRL); } - +#endif return 0; } @@ -3302,8 +2003,6 @@ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) * common DMA registers. * * @param[in] osi_core: OSI core private data structure. - * @param[in] tx_fifo_size: MTL TX FIFO size - * @param[in] rx_fifo_size: MTL RX FIFO size * * @note 1) MAC should be out of reset. See osi_poll_for_swr() for details. * 2) osi_core->base needs to be filled based on ioremap. @@ -3313,17 +2012,11 @@ static nve32_t mgbe_dma_chan_to_vmirq_map(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -static nve32_t mgbe_core_init(struct osi_core_priv_data *osi_core, - nveu32_t tx_fifo_size, - nveu32_t rx_fifo_size) +static nve32_t mgbe_core_init(struct osi_core_priv_data *const osi_core) { nve32_t ret = 0; nveu32_t qinx = 0; nveu32_t value = 0; - nveu32_t tx_fifo = 0; - nveu32_t rx_fifo = 0; - - mgbe_core_backup_init(osi_core); /* reset mmc counters */ osi_writela(osi_core, MGBE_MMC_CNTRL_CNTRST, (nveu8_t *)osi_core->base + @@ -3334,21 +2027,21 @@ static nve32_t mgbe_core_init(struct osi_core_priv_data *osi_core, MGBE_MTL_RXQ_DMA_MAP0); value |= MGBE_RXQ_TO_DMA_CHAN_MAP0; value |= MGBE_RXQ_TO_DMA_MAP_DDMACH; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_DMA_MAP0); value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_DMA_MAP1); value |= MGBE_RXQ_TO_DMA_CHAN_MAP1; value |= MGBE_RXQ_TO_DMA_MAP_DDMACH; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_DMA_MAP1); value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_DMA_MAP2); value |= MGBE_RXQ_TO_DMA_CHAN_MAP2; value |= MGBE_RXQ_TO_DMA_MAP_DDMACH; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_RXQ_DMA_MAP2); /* Enable XDCS in MAC_Extended_Configuration */ @@ -3358,50 +2051,41 @@ static nve32_t mgbe_core_init(struct osi_core_priv_data *osi_core, osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MAC_EXT_CNF); - if (osi_core->pre_si == OSI_ENABLE) { - /* For pre silicon Tx and Rx Queue sizes are 64KB */ - tx_fifo_size = MGBE_TX_FIFO_SIZE_64KB; - rx_fifo_size = MGBE_RX_FIFO_SIZE_64KB; - } else { - /* Actual HW RAM size for Tx is 128KB and Rx is 192KB */ - tx_fifo_size = MGBE_TX_FIFO_SIZE_128KB; - rx_fifo_size = MGBE_RX_FIFO_SIZE_192KB; - } - - /* Calculate value of Transmit queue fifo size to be programmed */ - tx_fifo = mgbe_calculate_per_queue_fifo(tx_fifo_size, - osi_core->num_mtl_queues); - - /* Calculate value of Receive queue fifo size to be programmed */ - rx_fifo = mgbe_calculate_per_queue_fifo(rx_fifo_size, - osi_core->num_mtl_queues); - /* Configure MTL Queues */ /* TODO: Iterate over Number MTL queues need to be removed */ for (qinx = 0; qinx < osi_core->num_mtl_queues; qinx++) { - ret = mgbe_configure_mtl_queue(osi_core->mtl_queues[qinx], - osi_core, tx_fifo, rx_fifo); + ret = mgbe_configure_mtl_queue(osi_core, osi_core->mtl_queues[qinx]); if (ret < 0) { - return ret; + goto fail; + } + /* Enable by default to configure forward error packets. + * Since this is a local function this will always return sucess, + * so no need to check for return value + */ + ret = hw_config_fw_err_pkts(osi_core, osi_core->mtl_queues[qinx], OSI_ENABLE); + if (ret < 0) { + goto fail; } } /* configure MGBE MAC HW */ ret = mgbe_configure_mac(osi_core); if (ret < 0) { - return ret; + goto fail; } /* configure MGBE DMA */ - mgbe_configure_dma(osi_core, osi_core->pre_si); + mgbe_configure_dma(osi_core); /* tsn initialization */ if (osi_core->hw_feature != OSI_NULL) { - mgbe_tsn_init(osi_core, osi_core->hw_feature->est_sel, - osi_core->hw_feature->fpe_sel); + hw_tsn_init(osi_core, osi_core->hw_feature->est_sel, + osi_core->hw_feature->fpe_sel); } - return mgbe_dma_chan_to_vmirq_map(osi_core); + ret = mgbe_dma_chan_to_vmirq_map(osi_core); +fail: + return ret; } /** @@ -3417,10 +2101,10 @@ static nve32_t mgbe_core_init(struct osi_core_priv_data *osi_core, */ static void mgbe_handle_mac_fpe_intrs(struct osi_core_priv_data *osi_core) { - unsigned int val = 0; + nveu32_t val = 0; /* interrupt bit clear on read as CSR_SW is reset */ - val = osi_readla(osi_core, (unsigned char *) + val = osi_readla(osi_core, (nveu8_t *) osi_core->base + MGBE_MAC_FPE_CTS); if ((val & MGBE_MAC_FPE_CTS_RVER) == MGBE_MAC_FPE_CTS_RVER) { @@ -3454,7 +2138,7 @@ static void mgbe_handle_mac_fpe_intrs(struct osi_core_priv_data *osi_core) val &= ~MGBE_MAC_FPE_CTS_EFPE; } - osi_writela(osi_core, val, (unsigned char *) + osi_writela(osi_core, val, (nveu8_t *) osi_core->base + MGBE_MAC_FPE_CTS); } @@ -3487,87 +2171,115 @@ static inline nveu32_t get_free_ts_idx(struct core_local *l_core) * MAC nve32_terrupts which includes speed, mode detection. * * @param[in] osi_core: OSI core private data structure. - * @param[in] dma_isr: DMA ISR register read value. * * @note MAC nve32_terrupts need to be enabled */ -static void mgbe_handle_mac_intrs(struct osi_core_priv_data *osi_core, - nveu32_t dma_isr) +static void mgbe_handle_mac_intrs(struct osi_core_priv_data *osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; nveu32_t mac_isr = 0; nveu32_t mac_ier = 0; nveu32_t tx_errors = 0; + nveu8_t *base = (nveu8_t *)osi_core->base; +#ifdef HSI_SUPPORT + nveu64_t tx_frame_err = 0; +#endif - mac_isr = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_ISR); - /* Handle MAC interrupts */ - if ((dma_isr & MGBE_DMA_ISR_MACIS) != MGBE_DMA_ISR_MACIS) { - return; + mac_isr = osi_readla(osi_core, base + MGBE_MAC_ISR); + + /* Check for Link status change interrupt */ + if ((mac_isr & MGBE_MAC_ISR_LSI) == OSI_ENABLE) { + /* For Local fault need to stop network data and restart the LANE bringup */ + if ((mac_isr & MGBE_MAC_ISR_LS_MASK) == MGBE_MAC_ISR_LS_LOCAL_FAULT) { + osi_core->osd_ops.restart_lane_bringup(osi_core->osd, OSI_DISABLE); + } else if ((mac_isr & MGBE_MAC_ISR_LS_MASK) == MGBE_MAC_ISR_LS_LINK_OK) { + osi_core->osd_ops.restart_lane_bringup(osi_core->osd, OSI_ENABLE); + } else { + /* Do Nothing */ + } } - mac_ier = osi_readla(osi_core, - (unsigned char *)osi_core->base + MGBE_MAC_IER); + mac_ier = osi_readla(osi_core, base + MGBE_MAC_IER); if (((mac_isr & MGBE_MAC_IMR_FPEIS) == MGBE_MAC_IMR_FPEIS) && ((mac_ier & MGBE_IMR_FPEIE) == MGBE_IMR_FPEIE)) { mgbe_handle_mac_fpe_intrs(osi_core); - mac_isr &= ~MGBE_MAC_IMR_FPEIS; } + /* Check for any MAC Transmit Error Status Interrupt */ if ((mac_isr & MGBE_IMR_TXESIE) == MGBE_IMR_TXESIE) { /* Check for the type of Tx error by reading MAC_Rx_Tx_Status * register */ - tx_errors = osi_readl((unsigned char *)osi_core->base + - MGBE_MAC_RX_TX_STS); + tx_errors = osi_readl(base + MGBE_MAC_RX_TX_STS); +#ifndef OSI_STRIPPED_LIB if ((tx_errors & MGBE_MAC_TX_TJT) == MGBE_MAC_TX_TJT) { /* increment Tx Jabber timeout stats */ - osi_core->pkt_err_stats.mgbe_jabber_timeout_err = + osi_core->stats.mgbe_jabber_timeout_err = osi_update_stats_counter( - osi_core->pkt_err_stats.mgbe_jabber_timeout_err, - 1UL); + osi_core->stats.mgbe_jabber_timeout_err, + 1UL); } if ((tx_errors & MGBE_MAC_TX_IHE) == MGBE_MAC_TX_IHE) { /* IP Header Error */ - osi_core->pkt_err_stats.mgbe_ip_header_err = + osi_core->stats.mgbe_ip_header_err = osi_update_stats_counter( - osi_core->pkt_err_stats.mgbe_ip_header_err, - 1UL); + osi_core->stats.mgbe_ip_header_err, + 1UL); } if ((tx_errors & MGBE_MAC_TX_PCE) == MGBE_MAC_TX_PCE) { /* Payload Checksum error */ - osi_core->pkt_err_stats.mgbe_payload_cs_err = + osi_core->stats.mgbe_payload_cs_err = osi_update_stats_counter( - osi_core->pkt_err_stats.mgbe_payload_cs_err, - 1UL); + osi_core->stats.mgbe_payload_cs_err, + 1UL); } +#endif /* !OSI_STRIPPED_LIB */ + +#ifdef HSI_SUPPORT + tx_errors &= (MGBE_MAC_TX_TJT | MGBE_MAC_TX_IHE | MGBE_MAC_TX_PCE); + if (tx_errors != OSI_NONE) { + osi_core->hsi.tx_frame_err_count = + osi_update_stats_counter( + osi_core->hsi.tx_frame_err_count, 1UL); + tx_frame_err = osi_core->hsi.tx_frame_err_count / + osi_core->hsi.err_count_threshold; + if (osi_core->hsi.tx_frame_err_threshold < + tx_frame_err) { + osi_core->hsi.tx_frame_err_threshold = tx_frame_err; + osi_core->hsi.report_count_err[TX_FRAME_ERR_IDX] = OSI_ENABLE; + } + osi_core->hsi.err_code[TX_FRAME_ERR_IDX] = OSI_TX_FRAME_ERR; + osi_core->hsi.report_err = OSI_ENABLE; + } +#endif } - osi_writela(osi_core, mac_isr, - (unsigned char *)osi_core->base + MGBE_MAC_ISR); if ((mac_isr & MGBE_ISR_TSIS) == MGBE_ISR_TSIS) { struct osi_core_tx_ts *head = &l_core->tx_ts_head; if (__sync_fetch_and_add(&l_core->ts_lock, 1) == 1U) { /* mask return as initial value is returned always */ (void)__sync_fetch_and_sub(&l_core->ts_lock, 1); - osi_core->xstats.ts_lock_add_fail = - osi_update_stats_counter( - osi_core->xstats.ts_lock_add_fail, 1U); +#ifndef OSI_STRIPPED_LIB + osi_core->stats.ts_lock_add_fail = + osi_update_stats_counter(osi_core->stats.ts_lock_add_fail, 1U); +#endif /* !OSI_STRIPPED_LIB */ goto done; } /* TXTSC bit should get reset when all timestamp read */ - while (((osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MAC_TSS) & - MGBE_MAC_TSS_TXTSC) == MGBE_MAC_TSS_TXTSC)) { + while (((osi_readla(osi_core, base + MGBE_MAC_TSS) & + MGBE_MAC_TSS_TXTSC) == MGBE_MAC_TSS_TXTSC)) { nveu32_t i = get_free_ts_idx(l_core); if (i == MAX_TX_TS_CNT) { struct osi_core_tx_ts *temp = l_core->tx_ts_head.next; - /* Remove oldest stale TS from list to make space for new TS */ + /* Remove oldest stale TS from list to make + * space for new TS + */ OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "Removing TS from queue pkt_id\n", temp->pkt_id); + "Removing TS from queue pkt_id\n", + temp->pkt_id); temp->in_use = OSI_DISABLE; /* remove temp node from the link */ @@ -3576,22 +2288,16 @@ static void mgbe_handle_mac_intrs(struct osi_core_priv_data *osi_core, i = get_free_ts_idx(l_core); if (i == MAX_TX_TS_CNT) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, - "TS queue is full\n", i); + "TS queue is full\n", i); break; } } - l_core->ts[i].nsec = osi_readla(osi_core, - (nveu8_t *)osi_core->base + - MGBE_MAC_TSNSSEC); + l_core->ts[i].nsec = osi_readla(osi_core, base + MGBE_MAC_TSNSSEC); l_core->ts[i].in_use = OSI_ENABLE; - l_core->ts[i].pkt_id = osi_readla(osi_core, - (nveu8_t *)osi_core->base + - MGBE_MAC_TSPKID); - l_core->ts[i].sec = osi_readla(osi_core, - (nveu8_t *)osi_core->base + - MGBE_MAC_TSSEC); + l_core->ts[i].pkt_id = osi_readla(osi_core, base + MGBE_MAC_TSPKID); + l_core->ts[i].sec = osi_readla(osi_core, base + MGBE_MAC_TSSEC); /* Add time stamp to end of list */ l_core->ts[i].next = head->prev->next; head->prev->next = &l_core->ts[i]; @@ -3603,13 +2309,10 @@ static void mgbe_handle_mac_intrs(struct osi_core_priv_data *osi_core, (void)__sync_fetch_and_sub(&l_core->ts_lock, 1); } done: - mac_isr &= ~MGBE_ISR_TSIS; - - osi_writela(osi_core, mac_isr, - (unsigned char *)osi_core->base + MGBE_MAC_ISR); - /* TODO: Duplex/speed settigs - Its not same as EQOS for MGBE */ + return; } +#ifndef OSI_STRIPPED_LIB /** * @brief mgbe_update_dma_sr_stats - stats for dma_status error * @@ -3625,31 +2328,32 @@ static inline void mgbe_update_dma_sr_stats(struct osi_core_priv_data *osi_core, nveu64_t val; if ((dma_sr & MGBE_DMA_CHX_STATUS_RBU) == MGBE_DMA_CHX_STATUS_RBU) { - val = osi_core->xstats.rx_buf_unavail_irq_n[qinx]; - osi_core->xstats.rx_buf_unavail_irq_n[qinx] = + val = osi_core->stats.rx_buf_unavail_irq_n[qinx]; + osi_core->stats.rx_buf_unavail_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & MGBE_DMA_CHX_STATUS_TPS) == MGBE_DMA_CHX_STATUS_TPS) { - val = osi_core->xstats.tx_proc_stopped_irq_n[qinx]; - osi_core->xstats.tx_proc_stopped_irq_n[qinx] = + val = osi_core->stats.tx_proc_stopped_irq_n[qinx]; + osi_core->stats.tx_proc_stopped_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & MGBE_DMA_CHX_STATUS_TBU) == MGBE_DMA_CHX_STATUS_TBU) { - val = osi_core->xstats.tx_buf_unavail_irq_n[qinx]; - osi_core->xstats.tx_buf_unavail_irq_n[qinx] = + val = osi_core->stats.tx_buf_unavail_irq_n[qinx]; + osi_core->stats.tx_buf_unavail_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & MGBE_DMA_CHX_STATUS_RPS) == MGBE_DMA_CHX_STATUS_RPS) { - val = osi_core->xstats.rx_proc_stopped_irq_n[qinx]; - osi_core->xstats.rx_proc_stopped_irq_n[qinx] = + val = osi_core->stats.rx_proc_stopped_irq_n[qinx]; + osi_core->stats.rx_proc_stopped_irq_n[qinx] = osi_update_stats_counter(val, 1U); } if ((dma_sr & MGBE_DMA_CHX_STATUS_FBE) == MGBE_DMA_CHX_STATUS_FBE) { - val = osi_core->xstats.fatal_bus_error_irq_n; - osi_core->xstats.fatal_bus_error_irq_n = + val = osi_core->stats.fatal_bus_error_irq_n; + osi_core->stats.fatal_bus_error_irq_n = osi_update_stats_counter(val, 1U); } } +#endif /* !OSI_STRIPPED_LIB */ /** * @brief mgbe_set_avb_algorithm - Set TxQ/TC avb config @@ -3674,65 +2378,65 @@ static inline void mgbe_update_dma_sr_stats(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_set_avb_algorithm( +static nve32_t mgbe_set_avb_algorithm( struct osi_core_priv_data *const osi_core, const struct osi_core_avb_algorithm *const avb) { - unsigned int value; - int ret = -1; - unsigned int qinx = 0U; - unsigned int tcinx = 0U; + nveu32_t value; + nve32_t ret = -1; + nveu32_t qinx = 0U; + nveu32_t tcinx = 0U; if (avb == OSI_NULL) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "avb structure is NULL\n", 0ULL); - return ret; + goto done; } /* queue index in range */ if (avb->qindex >= OSI_MGBE_MAX_NUM_QUEUES) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue index\n", - (unsigned long long)avb->qindex); - return ret; + (nveul64_t)avb->qindex); + goto done; } /* queue oper_mode in range check*/ if (avb->oper_mode >= OSI_MTL_QUEUE_MODEMAX) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue mode\n", - (unsigned long long)avb->qindex); - return ret; + (nveul64_t)avb->qindex); + goto done; } /* Validate algo is valid */ if (avb->algo > OSI_MTL_TXQ_AVALG_CBS) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Algo input\n", - (unsigned long long)avb->tcindex); - return ret; + (nveul64_t)avb->algo); + goto done; } /* can't set AVB mode for queue 0 */ if ((avb->qindex == 0U) && (avb->oper_mode == OSI_MTL_QUEUE_AVB)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OPNOTSUPP, "Not allowed to set AVB for Q0\n", - (unsigned long long)avb->qindex); - return ret; + (nveul64_t)avb->qindex); + goto done; } /* TC index range check */ if ((avb->tcindex == 0U) || (avb->tcindex >= OSI_MAX_TC_NUM)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue TC mapping\n", - (unsigned long long)avb->tcindex); - return ret; + (nveul64_t)avb->tcindex); + goto done; } qinx = avb->qindex; tcinx = avb->tcindex; - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx)); value &= ~MGBE_MTL_TX_OP_MODE_TXQEN; /* Set TXQEN mode as per input struct after masking 3 bit */ @@ -3742,54 +2446,77 @@ static int mgbe_set_avb_algorithm( value &= ~MGBE_MTL_TX_OP_MODE_Q2TCMAP; value |= ((tcinx << MGBE_MTL_TX_OP_MODE_Q2TCMAP_SHIFT) & MGBE_MTL_TX_OP_MODE_Q2TCMAP); - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx)); /* Set Algo and Credit control */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_CR(tcinx)); + value &= ~MGBE_MTL_TCQ_ETS_CR_AVALG; + value &= ~MGBE_MTL_TCQ_ETS_CR_CC; if (avb->algo == OSI_MTL_TXQ_AVALG_CBS) { - value &= ~MGBE_MTL_TCQ_ETS_CR_CC; value |= (avb->credit_control << MGBE_MTL_TCQ_ETS_CR_CC_SHIFT) & MGBE_MTL_TCQ_ETS_CR_CC; + value |= (OSI_MTL_TXQ_AVALG_CBS << MGBE_MTL_TCQ_ETS_CR_AVALG_SHIFT) & + MGBE_MTL_TCQ_ETS_CR_AVALG; + } else { + value |= (OSI_MGBE_TXQ_AVALG_ETS << MGBE_MTL_TCQ_ETS_CR_AVALG_SHIFT) & + MGBE_MTL_TCQ_ETS_CR_AVALG; } - value &= ~MGBE_MTL_TCQ_ETS_CR_AVALG; - value |= (avb->algo << MGBE_MTL_TCQ_ETS_CR_AVALG_SHIFT) & - MGBE_MTL_TCQ_ETS_CR_AVALG; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_CR(tcinx)); if (avb->algo == OSI_MTL_TXQ_AVALG_CBS) { /* Set Idle slope credit*/ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_QW(tcinx)); value &= ~MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK; value |= avb->idle_slope & MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_QW(tcinx)); /* Set Send slope credit */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_SSCR(tcinx)); value &= ~MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK; value |= avb->send_slope & MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_SSCR(tcinx)); /* Set Hi credit */ value = avb->hi_credit & MGBE_MTL_TCQ_ETS_HCR_HC_MASK; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_HCR(tcinx)); - /* low credit is -ve number, osi_write need a unsigned int + /* low credit is -ve number, osi_write need a nveu32_t * take only 28:0 bits from avb->low_credit */ value = avb->low_credit & MGBE_MTL_TCQ_ETS_LCR_LC_MASK; - osi_writela(osi_core, value, (unsigned char *)osi_core->base + + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_LCR(tcinx)); + } else { + /* Reset register values to POR/initialized values */ + osi_writela(osi_core, MGBE_MTL_TCQ_QW_ISCQW, (nveu8_t *)osi_core->base + + MGBE_MTL_TCQ_QW(tcinx)); + osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + + MGBE_MTL_TCQ_ETS_SSCR(tcinx)); + osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + + MGBE_MTL_TCQ_ETS_HCR(tcinx)); + osi_writela(osi_core, OSI_DISABLE, (nveu8_t *)osi_core->base + + MGBE_MTL_TCQ_ETS_LCR(tcinx)); + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + + MGBE_MTL_CHX_TX_OP_MODE(qinx)); + value &= ~MGBE_MTL_TX_OP_MODE_Q2TCMAP; + value |= (osi_core->tc[qinx] << MGBE_MTL_CHX_TX_OP_MODE_Q2TC_SH); + osi_writela(osi_core, value, (nveu8_t *)osi_core->base + + MGBE_MTL_CHX_TX_OP_MODE(qinx)); } - return 0; + ret = 0; + +done: + return ret; } /** @@ -3815,30 +2542,32 @@ static int mgbe_set_avb_algorithm( * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_get_avb_algorithm(struct osi_core_priv_data *const osi_core, - struct osi_core_avb_algorithm *const avb) +static nve32_t mgbe_get_avb_algorithm(struct osi_core_priv_data *const osi_core, + struct osi_core_avb_algorithm *const avb) { - unsigned int value; - int ret = -1; - unsigned int qinx = 0U; - unsigned int tcinx = 0U; + nveu32_t value; + nve32_t ret = 0; + nveu32_t qinx = 0U; + nveu32_t tcinx = 0U; if (avb == OSI_NULL) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "avb structure is NULL\n", 0ULL); - return ret; + ret = -1; + goto fail; } if (avb->qindex >= OSI_MGBE_MAX_NUM_QUEUES) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Queue index\n", - (unsigned long long)avb->qindex); - return ret; + (nveul64_t)avb->qindex); + ret = -1; + goto fail; } qinx = avb->qindex; - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_CHX_TX_OP_MODE(qinx)); /* Get TxQ/TC mode as per input struct after masking 3:2 bit */ @@ -3851,7 +2580,7 @@ static int mgbe_get_avb_algorithm(struct osi_core_priv_data *const osi_core, tcinx = avb->tcindex; /* Get Algo and Credit control */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_CR(tcinx)); avb->credit_control = (value & MGBE_MTL_TCQ_ETS_CR_CC) >> MGBE_MTL_TCQ_ETS_CR_CC_SHIFT; @@ -3860,29 +2589,29 @@ static int mgbe_get_avb_algorithm(struct osi_core_priv_data *const osi_core, if (avb->algo == OSI_MTL_TXQ_AVALG_CBS) { /* Get Idle slope credit*/ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_QW(tcinx)); avb->idle_slope = value & MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK; /* Get Send slope credit */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_SSCR(tcinx)); avb->send_slope = value & MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK; /* Get Hi credit */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_HCR(tcinx)); avb->hi_credit = value & MGBE_MTL_TCQ_ETS_HCR_HC_MASK; /* Get Low credit for which bit 31:29 are unknown * return 28:0 valid bits to application */ - value = osi_readla(osi_core, (unsigned char *)osi_core->base + + value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_TCQ_ETS_LCR(tcinx)); avb->low_credit = value & MGBE_MTL_TCQ_ETS_LCR_LC_MASK; } - - return 0; +fail: + return ret; } /** @@ -3898,44 +2627,47 @@ static int mgbe_get_avb_algorithm(struct osi_core_priv_data *const osi_core, * There is one status interrupt which says swich to SWOL complete. * * @param[in] osi_core: osi core priv data structure + * @param[in] mtl_isr: MTL interrupt status value * * @note MAC should be init and started. see osi_start_mac() */ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, - unsigned int mtl_isr) -{ - unsigned int val = 0U; - unsigned int sch_err = 0U; - unsigned int frm_err = 0U; - unsigned int temp = 0U; - unsigned int i = 0; - unsigned long stat_val = 0U; - unsigned int value = 0U; - unsigned int qstatus = 0U; - unsigned int qinx = 0U; + nveu32_t mtl_isr) +{ + nveu32_t val = 0U; + nveu32_t sch_err = 0U; + nveu32_t frm_err = 0U; + nveu32_t temp = 0U; + nveu32_t i = 0; + nveul64_t stat_val = 0U; + nveu32_t value = 0U; + nveu32_t qstatus = 0U; + nveu32_t qinx = 0U; /* Check for all MTL queues */ for (i = 0; i < osi_core->num_mtl_queues; i++) { qinx = osi_core->mtl_queues[i]; - if (mtl_isr & OSI_BIT(qinx)) { + if ((mtl_isr & OSI_BIT(qinx)) == OSI_BIT(qinx)) { /* check if Q has underflow error */ - qstatus = osi_readl((unsigned char *)osi_core->base + + qstatus = osi_readl((nveu8_t *)osi_core->base + MGBE_MTL_QINT_STATUS(qinx)); /* Transmit Queue Underflow Interrupt Status */ - if (qstatus & MGBE_MTL_QINT_TXUNIFS) { - osi_core->pkt_err_stats.mgbe_tx_underflow_err = + if ((qstatus & MGBE_MTL_QINT_TXUNIFS) == MGBE_MTL_QINT_TXUNIFS) { +#ifndef OSI_STRIPPED_LIB + osi_core->stats.mgbe_tx_underflow_err = osi_update_stats_counter( - osi_core->pkt_err_stats.mgbe_tx_underflow_err, + osi_core->stats.mgbe_tx_underflow_err, 1UL); +#endif /* !OSI_STRIPPED_LIB */ } /* Clear interrupt status by writing back with 1 */ - osi_writel(1U, (unsigned char *)osi_core->base + + osi_writel(1U, (nveu8_t *)osi_core->base + MGBE_MTL_QINT_STATUS(qinx)); } } if ((mtl_isr & MGBE_MTL_IS_ESTIS) != MGBE_MTL_IS_ESTIS) { - return; + goto done; } val = osi_readla(osi_core, @@ -3946,21 +2678,21 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, /* return if interrupt is not related to EST */ if (val == OSI_DISABLE) { - return; + goto done; } /* increase counter write 1 back will clear */ if ((val & MGBE_MTL_EST_STATUS_CGCE) == MGBE_MTL_EST_STATUS_CGCE) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.const_gate_ctr_err; - osi_core->tsn_stats.const_gate_ctr_err = + stat_val = osi_core->stats.const_gate_ctr_err; + osi_core->stats.const_gate_ctr_err = osi_update_stats_counter(stat_val, 1U); } if ((val & MGBE_MTL_EST_STATUS_HLBS) == MGBE_MTL_EST_STATUS_HLBS) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.head_of_line_blk_sch; - osi_core->tsn_stats.head_of_line_blk_sch = + stat_val = osi_core->stats.head_of_line_blk_sch; + osi_core->stats.head_of_line_blk_sch = osi_update_stats_counter(stat_val, 1U); /* Need to read MTL_EST_Sch_Error register and cleared */ sch_err = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -3969,28 +2701,28 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, temp = OSI_ENABLE; temp = temp << i; if ((sch_err & temp) == temp) { - stat_val = osi_core->tsn_stats.hlbs_q[i]; - osi_core->tsn_stats.hlbs_q[i] = + stat_val = osi_core->stats.hlbs_q[i]; + osi_core->stats.hlbs_q[i] = osi_update_stats_counter(stat_val, 1U); } } sch_err &= 0xFFU; //only 8 TC allowed so clearing all osi_writela(osi_core, sch_err, (nveu8_t *)osi_core->base + MGBE_MTL_EST_SCH_ERR); - /* Reset EST with print to configure it properly */ + /* Reset EST with prnve32_t to configure it properly */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_EST_CONTROL); value &= ~MGBE_MTL_EST_EEST; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_EST_CONTROL); - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Disabling EST due to HLBS, correct GCL\n", OSI_NONE); } if ((val & MGBE_MTL_EST_STATUS_HLBF) == MGBE_MTL_EST_STATUS_HLBF) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.head_of_line_blk_frm; - osi_core->tsn_stats.head_of_line_blk_frm = + stat_val = osi_core->stats.head_of_line_blk_frm; + osi_core->stats.head_of_line_blk_frm = osi_update_stats_counter(stat_val, 1U); /* Need to read MTL_EST_Frm_Size_Error register and cleared */ frm_err = osi_readla(osi_core, (nveu8_t *)osi_core->base + @@ -3999,8 +2731,8 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, temp = OSI_ENABLE; temp = temp << i; if ((frm_err & temp) == temp) { - stat_val = osi_core->tsn_stats.hlbf_q[i]; - osi_core->tsn_stats.hlbf_q[i] = + stat_val = osi_core->stats.hlbf_q[i]; + osi_core->stats.hlbf_q[i] = osi_update_stats_counter(stat_val, 1U); } } @@ -4008,7 +2740,7 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, osi_writela(osi_core, frm_err, (nveu8_t *)osi_core->base + MGBE_MTL_EST_FRMS_ERR); - /* Reset EST with print to configure it properly */ + /* Reset EST with prnve32_t to configure it properly */ value = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MTL_EST_CONTROL); /* DDBF 1 means don't drop packets */ @@ -4017,7 +2749,7 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, value &= ~MGBE_MTL_EST_EEST; osi_writela(osi_core, value, (nveu8_t *)osi_core->base + MGBE_MTL_EST_CONTROL); - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Disabling EST due to HLBF, correct GCL\n", OSI_NONE); } @@ -4028,15 +2760,15 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, MGBE_MTL_EST_STATUS_BTRE) { osi_core->est_ready = OSI_ENABLE; } - stat_val = osi_core->tsn_stats.sw_own_list_complete; - osi_core->tsn_stats.sw_own_list_complete = + stat_val = osi_core->stats.sw_own_list_complete; + osi_core->stats.sw_own_list_complete = osi_update_stats_counter(stat_val, 1U); } if ((val & MGBE_MTL_EST_STATUS_BTRE) == MGBE_MTL_EST_STATUS_BTRE) { osi_core->est_ready = OSI_DISABLE; - stat_val = osi_core->tsn_stats.base_time_reg_err; - osi_core->tsn_stats.base_time_reg_err = + stat_val = osi_core->stats.base_time_reg_err; + osi_core->stats.base_time_reg_err = osi_update_stats_counter(stat_val, 1U); osi_core->est_ready = OSI_DISABLE; } @@ -4044,11 +2776,12 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, osi_writela(osi_core, val, (nveu8_t *)osi_core->base + MGBE_MTL_EST_STATUS); - mtl_isr &= ~MGBE_MTL_IS_ESTIS; - osi_writela(osi_core, mtl_isr, (unsigned char *)osi_core->base + - MGBE_MTL_INTR_STATUS); +done: + return; } +#ifndef OSI_STRIPPED_LIB + /** * @brief mgbe_config_ptp_offload - Enable/Disable PTP offload * @@ -4064,17 +2797,17 @@ static void mgbe_handle_mtl_intrs(struct osi_core_priv_data *osi_core, * @retval -1 on failure. */ -static int mgbe_config_ptp_offload(struct osi_core_priv_data *const osi_core, - struct osi_pto_config *const pto_config) +static nve32_t mgbe_config_ptp_offload(struct osi_core_priv_data *const osi_core, + struct osi_pto_config *const pto_config) { - unsigned char *addr = (unsigned char *)osi_core->base; - int ret = 0; - unsigned int value = 0x0U; - unsigned int ptc_value = 0x0U; - unsigned int port_id = 0x0U; + nveu8_t *addr = (nveu8_t *)osi_core->base; + nve32_t ret = 0; + nveu32_t value = 0x0U; + nveu32_t ptc_value = 0x0U; + nveu32_t port_id = 0x0U; /* Read MAC TCR */ - value = osi_readla(osi_core, (unsigned char *)addr + MGBE_MAC_TCR); + value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_TCR); /* clear old configuration */ value &= ~(MGBE_MAC_TCR_TSENMACADDR | OSI_MAC_TCR_SNAPTYPSEL_3 | @@ -4153,6 +2886,7 @@ static int mgbe_config_ptp_offload(struct osi_core_priv_data *const osi_core, return ret; } +#endif /* !OSI_STRIPPED_LIB */ #ifdef HSI_SUPPORT /** @@ -4172,13 +2906,19 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core) nveu32_t val2 = 0; void *xpcs_base = osi_core->xpcs_base; nveu64_t ce_count_threshold; + const nveu32_t osi_hsi_err_code[][2] = { + {OSI_HSI_MGBE0_UE_CODE, OSI_HSI_MGBE0_CE_CODE}, + {OSI_HSI_MGBE1_UE_CODE, OSI_HSI_MGBE1_CE_CODE}, + {OSI_HSI_MGBE2_UE_CODE, OSI_HSI_MGBE2_CE_CODE}, + {OSI_HSI_MGBE3_UE_CODE, OSI_HSI_MGBE3_CE_CODE}, + }; val = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_WRAP_COMMON_INTR_STATUS); if (((val & MGBE_REGISTER_PARITY_ERR) == MGBE_REGISTER_PARITY_ERR) || ((val & MGBE_CORE_UNCORRECTABLE_ERR) == MGBE_CORE_UNCORRECTABLE_ERR)) { osi_core->hsi.err_code[UE_IDX] = - hsi_err_code[osi_core->instance_id][UE_IDX]; + osi_hsi_err_code[osi_core->instance_id][UE_IDX]; osi_core->hsi.report_err = OSI_ENABLE; osi_core->hsi.report_count_err[UE_IDX] = OSI_ENABLE; /* Disable the interrupt */ @@ -4191,7 +2931,7 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core) } if ((val & MGBE_CORE_CORRECTABLE_ERR) == MGBE_CORE_CORRECTABLE_ERR) { osi_core->hsi.err_code[CE_IDX] = - hsi_err_code[osi_core->instance_id][CE_IDX]; + osi_hsi_err_code[osi_core->instance_id][CE_IDX]; osi_core->hsi.report_err = OSI_ENABLE; osi_core->hsi.ce_count = osi_update_stats_counter(osi_core->hsi.ce_count, 1UL); @@ -4230,7 +2970,7 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core) XPCS_WRAP_INTERRUPT_STATUS); if (((val & XPCS_CORE_UNCORRECTABLE_ERR) == XPCS_CORE_UNCORRECTABLE_ERR) || ((val & XPCS_REGISTER_PARITY_ERR) == XPCS_REGISTER_PARITY_ERR)) { - osi_core->hsi.err_code[UE_IDX] = hsi_err_code[osi_core->instance_id][UE_IDX]; + osi_core->hsi.err_code[UE_IDX] = osi_hsi_err_code[osi_core->instance_id][UE_IDX]; osi_core->hsi.report_err = OSI_ENABLE; osi_core->hsi.report_count_err[UE_IDX] = OSI_ENABLE; /* Disable uncorrectable interrupts */ @@ -4242,7 +2982,7 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core) XPCS_WRAP_INTERRUPT_CONTROL); } if ((val & XPCS_CORE_CORRECTABLE_ERR) == XPCS_CORE_CORRECTABLE_ERR) { - osi_core->hsi.err_code[CE_IDX] = hsi_err_code[osi_core->instance_id][CE_IDX]; + osi_core->hsi.err_code[CE_IDX] = osi_hsi_err_code[osi_core->instance_id][CE_IDX]; osi_core->hsi.report_err = OSI_ENABLE; osi_core->hsi.ce_count = osi_update_stats_counter(osi_core->hsi.ce_count, 1UL); @@ -4280,16 +3020,16 @@ static void mgbe_handle_hsi_intr(struct osi_core_priv_data *osi_core) * * @note MAC should be init and started. see osi_start_mac() */ -static void mgbe_handle_common_intr(struct osi_core_priv_data *osi_core) +static void mgbe_handle_common_intr(struct osi_core_priv_data *const osi_core) { void *base = osi_core->base; - unsigned int dma_isr = 0; - unsigned int qinx = 0; - unsigned int i = 0; - unsigned int dma_sr = 0; - unsigned int dma_ier = 0; - unsigned int mtl_isr = 0; - unsigned int val = 0; + nveu32_t dma_isr = 0; + nveu32_t qinx = 0; + nveu32_t i = 0; + nveu32_t dma_sr = 0; + nveu32_t dma_ier = 0; + nveu32_t mtl_isr = 0; + nveu32_t val = 0; #ifdef HSI_SUPPORT if (osi_core->hsi.enabled == OSI_ENABLE) { @@ -4298,7 +3038,7 @@ static void mgbe_handle_common_intr(struct osi_core_priv_data *osi_core) #endif dma_isr = osi_readla(osi_core, (nveu8_t *)base + MGBE_DMA_ISR); if (dma_isr == OSI_NONE) { - return; + goto done; } //FIXME Need to check how we can get the DMA channel here instead of @@ -4334,26 +3074,31 @@ static void mgbe_handle_common_intr(struct osi_core_priv_data *osi_core) /* ack non ti/ri nve32_ts */ osi_writela(osi_core, dma_sr, (nveu8_t *)base + MGBE_DMA_CHX_STATUS(qinx)); +#ifndef OSI_STRIPPED_LIB mgbe_update_dma_sr_stats(osi_core, dma_sr, qinx); +#endif /* !OSI_STRIPPED_LIB */ } } - mgbe_handle_mac_intrs(osi_core, dma_isr); + /* Handle MAC interrupts */ + if ((dma_isr & MGBE_DMA_ISR_MACIS) == MGBE_DMA_ISR_MACIS) { + mgbe_handle_mac_intrs(osi_core); + } /* Handle MTL inerrupts */ mtl_isr = osi_readla(osi_core, - (unsigned char *)base + MGBE_MTL_INTR_STATUS); + (nveu8_t *)base + MGBE_MTL_INTR_STATUS); if ((dma_isr & MGBE_DMA_ISR_MTLIS) == MGBE_DMA_ISR_MTLIS) { mgbe_handle_mtl_intrs(osi_core, mtl_isr); } /* Clear common interrupt status in wrapper register */ osi_writela(osi_core, MGBE_MAC_SBD_INTR, - (unsigned char *)base + MGBE_WRAP_COMMON_INTR_STATUS); - val = osi_readla(osi_core, (unsigned char *)osi_core->base + + (nveu8_t *)base + MGBE_WRAP_COMMON_INTR_STATUS); + val = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_WRAP_COMMON_INTR_ENABLE); val |= MGBE_MAC_SBD_INTR; - osi_writela(osi_core, val, (unsigned char *)osi_core->base + + osi_writela(osi_core, val, (nveu8_t *)osi_core->base + MGBE_WRAP_COMMON_INTR_ENABLE); /* Clear FRP Interrupts in MTL_RXP_Interrupt_Control_Status */ @@ -4363,6 +3108,9 @@ static void mgbe_handle_common_intr(struct osi_core_priv_data *osi_core) MGBE_MTL_RXP_INTR_CS_FOOVIS | MGBE_MTL_RXP_INTR_CS_PDRFIS); osi_writela(osi_core, val, (nveu8_t *)base + MGBE_MTL_RXP_INTR_CS); + +done: + return; } /** @@ -4381,58 +3129,7 @@ static nve32_t mgbe_pad_calibrate(OSI_UNUSED return 0; } -/** - * @brief mgbe_start_mac - Start MAC Tx/Rx engine - * - * Algorithm: Enable MAC Transmitter and Receiver - * - * @param[in] osi_core: OSI core private data structure. - * - * @note 1) MAC init should be complete. See osi_hw_core_init() and - * osi_hw_dma_init() - */ -static void mgbe_start_mac(struct osi_core_priv_data *const osi_core) -{ - nveu32_t value; - void *addr = osi_core->base; - - value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_TMCR); - /* Enable MAC Transmit */ - value |= MGBE_MAC_TMCR_TE; - osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_TMCR); - - value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_RMCR); - /* Enable MAC Receive */ - value |= MGBE_MAC_RMCR_RE; - osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_RMCR); -} - -/** - * @brief mgbe_stop_mac - Stop MAC Tx/Rx engine - * - * Algorithm: Disables MAC Transmitter and Receiver - * - * @param[in] osi_core: OSI core private data structure. - * - * @note MAC DMA deinit should be complete. See osi_hw_dma_deinit() - */ -static void mgbe_stop_mac(struct osi_core_priv_data *const osi_core) -{ - nveu32_t value; - void *addr = osi_core->base; - - value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_TMCR); - /* Disable MAC Transmit */ - value &= ~MGBE_MAC_TMCR_TE; - osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_TMCR); - - value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_RMCR); - /* Disable MAC Receive */ - value &= ~MGBE_MAC_RMCR_RE; - osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_RMCR); -} - -#ifdef MACSEC_SUPPORT +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /** * @brief mgbe_config_mac_tx - Enable/Disable MAC Tx * @@ -4440,90 +3137,28 @@ static void mgbe_stop_mac(struct osi_core_priv_data *const osi_core) * * @param[in] osi_core: OSI core private data structure. * @param[in] enable: Enable or Disable.MAC Tx - * - * @note 1) MAC init should be complete. See osi_hw_core_init() - */ -static void mgbe_config_mac_tx(struct osi_core_priv_data *const osi_core, - const nveu32_t enable) -{ - nveu32_t value; - void *addr = osi_core->base; - - if (enable == OSI_ENABLE) { - value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_TMCR); - /* Enable MAC Transmit */ - value |= MGBE_MAC_TMCR_TE; - osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_TMCR); - } else { - value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_TMCR); - /* Disable MAC Transmit */ - value &= ~MGBE_MAC_TMCR_TE; - osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_TMCR); - } -} -#endif /* MACSEC_SUPPORT */ - -/** - * @brief mgbe_core_deinit - MGBE MAC core deinitialization - * - * Algorithm: This function will take care of deinitializing MAC - * - * @param[in] osi_core: OSI core private data structure. - * - * @note Required clks and resets has to be enabled - */ -static void mgbe_core_deinit(struct osi_core_priv_data *osi_core) -{ - /* Stop the MAC by disabling both MAC Tx and Rx */ - mgbe_stop_mac(osi_core); -} - -/** - * @brief mgbe_set_speed - Set operating speed - * - * Algorithm: Based on the speed (2.5G/5G/10G) MAC will be configured - * accordingly. - * - * @param[in] osi_core: OSI core private data. - * @param[in] speed: Operating speed. - * - * @note MAC should be init and started. see osi_start_mac() - */ -static int mgbe_set_speed(struct osi_core_priv_data *const osi_core, - const int speed) -{ - unsigned int value = 0; - - value = osi_readla(osi_core, - (unsigned char *) osi_core->base + MGBE_MAC_TMCR); - - switch (speed) { - case OSI_SPEED_2500: - value |= MGBE_MAC_TMCR_SS_2_5G; - break; - case OSI_SPEED_5000: - value |= MGBE_MAC_TMCR_SS_5G; - break; - case OSI_SPEED_10000: - value &= ~MGBE_MAC_TMCR_SS_10G; - break; - default: - /* setting default to 10G */ - value &= ~MGBE_MAC_TMCR_SS_10G; - break; - } - - osi_writela(osi_core, value, (unsigned char *) - osi_core->base + MGBE_MAC_TMCR); + * + * @note 1) MAC init should be complete. See osi_hw_core_init() + */ +static void mgbe_config_mac_tx(struct osi_core_priv_data *const osi_core, + const nveu32_t enable) +{ + nveu32_t value; + void *addr = osi_core->base; - if (xpcs_init(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "xpcs_init failed\n", OSI_NONE); - return -1; + if (enable == OSI_ENABLE) { + value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_TMCR); + /* Enable MAC Transmit */ + value |= MGBE_MAC_TMCR_TE; + osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_TMCR); + } else { + value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_TMCR); + /* Disable MAC Transmit */ + value &= ~MGBE_MAC_TMCR_TE; + osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_TMCR); } - - return xpcs_start(osi_core); } +#endif /* MACSEC_SUPPORT */ /** * @brief mgbe_mdio_busy_wait - MDIO busy wait loop @@ -4532,23 +3167,25 @@ static int mgbe_set_speed(struct osi_core_priv_data *const osi_core, * * @param[in] osi_core: OSI core data struture. */ -static int mgbe_mdio_busy_wait(struct osi_core_priv_data *const osi_core) +static nve32_t mgbe_mdio_busy_wait(struct osi_core_priv_data *const osi_core) { /* half second timeout */ - unsigned int retry = 50000; - unsigned int mac_gmiiar; - unsigned int count; - int cond = 1; + nveu32_t retry = 50000; + nveu32_t mac_gmiiar; + nveu32_t count; + nve32_t cond = 1; + nve32_t ret = 0; count = 0; while (cond == 1) { if (count > retry) { - return -1; + ret = -1; + goto fail; } count++; - mac_gmiiar = osi_readla(osi_core, (unsigned char *) + mac_gmiiar = osi_readla(osi_core, (nveu8_t *) osi_core->base + MGBE_MDIO_SCCD); if ((mac_gmiiar & MGBE_MDIO_SCCD_SBUSY) == 0U) { cond = 0; @@ -4556,169 +3193,7 @@ static int mgbe_mdio_busy_wait(struct osi_core_priv_data *const osi_core) osi_core->osd_ops.udelay(10U); } } - - return 0; -} - -/* - * @brief mgbe_save_registers Function to store a backup of - * MAC register space during SOC suspend. - * - * Algorithm: Read registers to be backed up as per struct core_backup and - * store the register values in memory. - * - * @param[in] osi_core: OSI core private data structure. - * - * @retval 0 on success - * @retval -1 on failure. - */ -static inline int mgbe_save_registers( - struct osi_core_priv_data *const osi_core) -{ - unsigned int i = 0; - struct core_backup *config = &osi_core->backup_config; - int ret = 0; - - /* Save direct access registers */ - for (i = 0; i < MGBE_DIRECT_MAX_BAK_IDX; i++) { - if (config->reg_addr[i] != OSI_NULL) { - /* Read the register and store into reg_val */ - config->reg_val[i] = osi_readla(osi_core, - config->reg_addr[i]); - } - } - - /* Save L3 and L4 indirect addressing registers */ - for (i = 0; i < OSI_MGBE_MAX_L3_L4_FILTER; i++) { - ret = mgbe_l3l4_filter_read(osi_core, i, MGBE_MAC_L3L4_CTR, - &config->reg_val[MGBE_MAC_L3L4_CTR_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3L4_CTR read fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_read(osi_core, i, MGBE_MAC_L4_ADDR, - &config->reg_val[MGBE_MAC_L4_ADR_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L4_ADDR read fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_read(osi_core, i, MGBE_MAC_L3_AD0R, - &config->reg_val[MGBE_MAC_L3_AD0R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD0R read fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_read(osi_core, i, MGBE_MAC_L3_AD1R, - &config->reg_val[MGBE_MAC_L3_AD1R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD1R read fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_read(osi_core, i, MGBE_MAC_L3_AD2R, - &config->reg_val[MGBE_MAC_L3_AD2R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD2R read fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_read(osi_core, i, MGBE_MAC_L3_AD3R, - &config->reg_val[MGBE_MAC_L3_AD3R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD3R read fail return here */ - return ret; - } - } - - /* Save MAC_DChSel_IndReg indirect addressing registers */ - for (i = 0; i < OSI_MGBE_MAX_MAC_ADDRESS_FILTER; i++) { - ret = mgbe_mac_indir_addr_read(osi_core, MGBE_MAC_DCHSEL, - i, &config->reg_val[MGBE_MAC_DCHSEL_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_DCHSEL read fail return here */ - return ret; - } - } - - return ret; -} - -/** - * @brief mgbe_restore_registers Function to restore the backup of - * MAC registers during SOC resume. - * - * Algorithm: Restore the register values from the in memory backup taken using - * mgbe_save_registers(). - * - * @param[in] osi_core: OSI core private data structure. - * - * @retval 0 on success - * @retval -1 on failure. - */ -static inline int mgbe_restore_registers( - struct osi_core_priv_data *const osi_core) -{ - unsigned int i = 0; - struct core_backup *config = &osi_core->backup_config; - int ret = 0; - - /* Restore direct access registers */ - for (i = 0; i < MGBE_MAX_BAK_IDX; i++) { - if (config->reg_addr[i] != OSI_NULL) { - /* Write back the saved register value */ - osi_writela(osi_core, config->reg_val[i], - config->reg_addr[i]); - } - } - - /* Restore L3 and L4 indirect addressing registers */ - for (i = 0; i < OSI_MGBE_MAX_L3_L4_FILTER; i++) { - ret = mgbe_l3l4_filter_write(osi_core, i, MGBE_MAC_L3L4_CTR, - config->reg_val[MGBE_MAC_L3L4_CTR_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3L4_CTR write fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_write(osi_core, i, MGBE_MAC_L4_ADDR, - config->reg_val[MGBE_MAC_L4_ADR_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L4_ADDR write fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_write(osi_core, i, MGBE_MAC_L3_AD0R, - config->reg_val[MGBE_MAC_L3_AD0R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD0R write fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_write(osi_core, i, MGBE_MAC_L3_AD1R, - config->reg_val[MGBE_MAC_L3_AD1R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD1R write fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_write(osi_core, i, MGBE_MAC_L3_AD2R, - config->reg_val[MGBE_MAC_L3_AD2R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD2R write fail return here */ - return ret; - } - ret = mgbe_l3l4_filter_write(osi_core, i, MGBE_MAC_L3_AD3R, - config->reg_val[MGBE_MAC_L3_AD3R_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_L3_AD3R write fail return here */ - return ret; - } - } - - /* Restore MAC_DChSel_IndReg indirect addressing registers */ - for (i = 0; i < OSI_MGBE_MAX_MAC_ADDRESS_FILTER; i++) { - ret = mgbe_mac_indir_addr_write(osi_core, MGBE_MAC_DCHSEL, - i, config->reg_val[MGBE_MAC_DCHSEL_BAK_IDX(i)]); - if (ret < 0) { - /* MGBE_MAC_DCHSEL write fail return here */ - return ret; - } - } - +fail: return ret; } @@ -4737,13 +3212,13 @@ static inline int mgbe_restore_registers( * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_write_phy_reg(struct osi_core_priv_data *osi_core, - unsigned int phyaddr, - unsigned int phyreg, - unsigned short phydata) +static nve32_t mgbe_write_phy_reg(struct osi_core_priv_data *const osi_core, + const nveu32_t phyaddr, + const nveu32_t phyreg, + const nveu16_t phydata) { - int ret = 0; - unsigned int reg; + nve32_t ret = 0; + nveu32_t reg; /* Wait for any previous MII read/write operation to complete */ ret = mgbe_mdio_busy_wait(osi_core); @@ -4752,7 +3227,7 @@ static int mgbe_write_phy_reg(struct osi_core_priv_data *osi_core, OSI_LOG_ARG_HW_FAIL, "MII operation timed out\n", 0ULL); - return ret; + goto fail; } /* set MDIO address register */ @@ -4762,12 +3237,12 @@ static int mgbe_write_phy_reg(struct osi_core_priv_data *osi_core, /* set port address and register address */ reg |= (phyaddr << MGBE_MDIO_SCCA_PA_SHIFT) | (phyreg & MGBE_MDIO_SCCA_RA_MASK); - osi_writela(osi_core, reg, (unsigned char *) + osi_writela(osi_core, reg, (nveu8_t *) osi_core->base + MGBE_MDIO_SCCA); /* Program Data register */ reg = phydata | - (MGBE_MDIO_SCCD_CMD_WR << MGBE_MDIO_SCCD_CMD_SHIFT) | + (((nveu32_t)MGBE_MDIO_SCCD_CMD_WR) << MGBE_MDIO_SCCD_CMD_SHIFT) | MGBE_MDIO_SCCD_SBUSY; /** @@ -4776,17 +3251,10 @@ static int mgbe_write_phy_reg(struct osi_core_priv_data *osi_core, * On Silicon AXI/APB clock is 408MHz. To achive maximum MDC clock * of 2.5MHz only CR need to be set to 5. */ - if (osi_core->pre_si) { - reg |= (MGBE_MDIO_SCCD_CRS | - ((0x1U & MGBE_MDIO_SCCD_CR_MASK) << - MGBE_MDIO_SCCD_CR_SHIFT)); - } else { - reg &= ~MGBE_MDIO_SCCD_CRS; - reg |= ((0x5U & MGBE_MDIO_SCCD_CR_MASK) << - MGBE_MDIO_SCCD_CR_SHIFT); - } + reg &= ~MGBE_MDIO_SCCD_CRS; + reg |= ((((nveu32_t)0x5U) & MGBE_MDIO_SCCD_CR_MASK) << MGBE_MDIO_SCCD_CR_SHIFT); - osi_writela(osi_core, reg, (unsigned char *) + osi_writela(osi_core, reg, (nveu8_t *) osi_core->base + MGBE_MDIO_SCCD); /* wait for MII write operation to complete */ @@ -4796,10 +3264,9 @@ static int mgbe_write_phy_reg(struct osi_core_priv_data *osi_core, OSI_LOG_ARG_HW_FAIL, "MII operation timed out\n", 0ULL); - return ret; } - - return 0; +fail: + return ret; } /** @@ -4816,13 +3283,13 @@ static int mgbe_write_phy_reg(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_read_phy_reg(struct osi_core_priv_data *osi_core, - unsigned int phyaddr, - unsigned int phyreg) +static nve32_t mgbe_read_phy_reg(struct osi_core_priv_data *const osi_core, + const nveu32_t phyaddr, + const nveu32_t phyreg) { - unsigned int reg; - unsigned int data; - int ret = 0; + nveu32_t reg; + nveu32_t data; + nve32_t ret = 0; ret = mgbe_mdio_busy_wait(osi_core); if (ret < 0) { @@ -4830,7 +3297,7 @@ static int mgbe_read_phy_reg(struct osi_core_priv_data *osi_core, OSI_LOG_ARG_HW_FAIL, "MII operation timed out\n", 0ULL); - return ret; + goto fail; } /* set MDIO address register */ @@ -4840,11 +3307,11 @@ static int mgbe_read_phy_reg(struct osi_core_priv_data *osi_core, /* set port address and register address */ reg |= (phyaddr << MGBE_MDIO_SCCA_PA_SHIFT) | (phyreg & MGBE_MDIO_SCCA_RA_MASK); - osi_writela(osi_core, reg, (unsigned char *) + osi_writela(osi_core, reg, (nveu8_t *) osi_core->base + MGBE_MDIO_SCCA); /* Program Data register */ - reg = (MGBE_MDIO_SCCD_CMD_RD << MGBE_MDIO_SCCD_CMD_SHIFT) | + reg = (((nveu32_t)MGBE_MDIO_SCCD_CMD_RD) << MGBE_MDIO_SCCD_CMD_SHIFT) | MGBE_MDIO_SCCD_SBUSY; /** @@ -4853,369 +3320,31 @@ static int mgbe_read_phy_reg(struct osi_core_priv_data *osi_core, * On Silicon AXI/APB clock is 408MHz. To achive maximum MDC clock * of 2.5MHz only CR need to be set to 5. */ - if (osi_core->pre_si) { - reg |= (MGBE_MDIO_SCCD_CRS | - ((0x1U & MGBE_MDIO_SCCD_CR_MASK) << - MGBE_MDIO_SCCD_CR_SHIFT)); - } else { - reg &= ~MGBE_MDIO_SCCD_CRS; - reg |= ((0x5U & MGBE_MDIO_SCCD_CR_MASK) << - MGBE_MDIO_SCCD_CR_SHIFT); - } - - osi_writela(osi_core, reg, (unsigned char *) - osi_core->base + MGBE_MDIO_SCCD); - - ret = mgbe_mdio_busy_wait(osi_core); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, - OSI_LOG_ARG_HW_FAIL, - "MII operation timed out\n", - 0ULL); - return ret; - } - - reg = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MDIO_SCCD); - - data = (reg & MGBE_MDIO_SCCD_SDATA_MASK); - return (int)data; -} + reg &= ~MGBE_MDIO_SCCD_CRS; + reg |= ((((nveu32_t)0x5U) & MGBE_MDIO_SCCD_CR_MASK) << MGBE_MDIO_SCCD_CR_SHIFT); -/** - * @brief mgbe_hw_est_write - indirect write the GCL to Software own list - * (SWOL) - * - * @param[in] base: MAC base IOVA address. - * @param[in] addr_val: Address offset for indirect write. - * @param[in] data: Data to be written at offset. - * @param[in] gcla: Gate Control List Address, 0 for ETS register. - * 1 for GCL memory. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_hw_est_write(struct osi_core_priv_data *osi_core, - unsigned int addr_val, unsigned int data, - unsigned int gcla) -{ - int retry = 1000; - unsigned int val = 0x0; - - osi_writela(osi_core, data, (unsigned char *)osi_core->base + - MGBE_MTL_EST_DATA); - - val &= ~MGBE_MTL_EST_ADDR_MASK; - val |= (gcla == 1U) ? 0x0U : MGBE_MTL_EST_GCRR; - val |= MGBE_MTL_EST_SRWO; - val |= addr_val; - osi_writela(osi_core, val, (unsigned char *)osi_core->base + - MGBE_MTL_EST_GCL_CONTROL); - - while (--retry > 0) { - osi_core->osd_ops.udelay(OSI_DELAY_1US); - val = osi_readla(osi_core, (unsigned char *)osi_core->base + - MGBE_MTL_EST_GCL_CONTROL); - if ((val & MGBE_MTL_EST_SRWO) == MGBE_MTL_EST_SRWO) { - continue; - } - - break; - } - - if ((val & MGBE_MTL_EST_ERR0) == MGBE_MTL_EST_ERR0 || - (retry <= 0)) { - return -1; - } - - return 0; -} - -/** - * @brief mgbe_hw_config_est - Read Setting for GCL from input and update - * registers. - * - * Algorithm: - * 1) Write TER, LLR and EST control register - * 2) Update GCL to sw own GCL (MTL_EST_Status bit SWOL will tell which is - * owned by SW) and store which GCL is in use currently in sw. - * 3) TODO set DBGB and DBGM for debugging - * 4) EST_data and GCRR to 1, update entry sno in ADDR and put data at - * est_gcl_data enable GCL MTL_EST_SSWL and wait for self clear or use - * SWLC in MTL_EST_Status. Please note new GCL will be pushed for each entry. - * 5) Configure btr. Update btr based on current time (current time - * should be updated based on PTP by this time) - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] est: EST configuration input argument. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_hw_config_est(struct osi_core_priv_data *osi_core, - struct osi_est_config *est) -{ - unsigned int btr[2] = {0}; - unsigned int val = 0x0; - void *base = osi_core->base; - unsigned int i; - int ret = 0; - unsigned int addr = 0x0; - - if ((osi_core->hw_feature != OSI_NULL) && - (osi_core->hw_feature->est_sel == OSI_DISABLE)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "EST not supported in HW\n", 0ULL); - return -1; - } - - if (est->en_dis == OSI_DISABLE) { - val = osi_readla(osi_core, (unsigned char *) - base + MGBE_MTL_EST_CONTROL); - val &= ~MGBE_MTL_EST_EEST; - osi_writela(osi_core, val, (unsigned char *) - base + MGBE_MTL_EST_CONTROL); - - return 0; - } - - btr[0] = est->btr[0]; - btr[1] = est->btr[1]; - if (btr[0] == 0U && btr[1] == 0U) { - common_get_systime_from_mac(osi_core->base, - osi_core->mac, - &btr[1], &btr[0]); - } - - if (gcl_validate(osi_core, est, btr, osi_core->mac) < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL validation failed\n", 0LL); - return -1; - } - - ret = mgbe_hw_est_write(osi_core, MGBE_MTL_EST_CTR_LOW, est->ctr[0], 0); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL CTR[0] failed\n", 0LL); - return ret; - } - /* check for est->ctr[i] not more than FF, TODO as per hw config - * parameter we can have max 0x3 as this value in sec */ - est->ctr[1] &= MGBE_MTL_EST_CTR_HIGH_MAX; - ret = mgbe_hw_est_write(osi_core, MGBE_MTL_EST_CTR_HIGH, est->ctr[1], 0); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL CTR[1] failed\n", 0LL); - return ret; - } - - ret = mgbe_hw_est_write(osi_core, MGBE_MTL_EST_TER, est->ter, 0); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL TER failed\n", 0LL); - return ret; - } - - ret = mgbe_hw_est_write(osi_core, MGBE_MTL_EST_LLR, est->llr, 0); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL LLR failed\n", 0LL); - return ret; - } - - /* Write GCL table */ - for (i = 0U; i < est->llr; i++) { - addr = i; - addr = addr << MGBE_MTL_EST_ADDR_SHIFT; - addr &= MGBE_MTL_EST_ADDR_MASK; - ret = mgbe_hw_est_write(osi_core, addr, est->gcl[i], 1); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL enties write failed\n", - (unsigned long long)i); - return ret; - } - } - - /* Write parameters */ - ret = mgbe_hw_est_write(osi_core, MGBE_MTL_EST_BTR_LOW, - btr[0] + est->btr_offset[0], OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL BTR[0] failed\n", - (unsigned long long)(btr[0] + - est->btr_offset[0])); - return ret; - } - - ret = mgbe_hw_est_write(osi_core, MGBE_MTL_EST_BTR_HIGH, - btr[1] + est->btr_offset[1], OSI_DISABLE); - if (ret < 0) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "GCL BTR[1] failed\n", - (unsigned long long)(btr[1] + - est->btr_offset[1])); - return ret; - } - - val = osi_readla(osi_core, (unsigned char *) - base + MGBE_MTL_EST_CONTROL); - /* Store table */ - val |= MGBE_MTL_EST_SSWL; - val |= MGBE_MTL_EST_EEST; - val |= MGBE_MTL_EST_QHLBF; - osi_writela(osi_core, val, (unsigned char *) - base + MGBE_MTL_EST_CONTROL); - - return ret; -} - -/** - * @brief mgbe_hw_config_fep - Read Setting for preemption and express for TC - * and update registers. - * - * Algorithm: - * 1) Check for TC enable and TC has masked for setting to preemptable. - * 2) update FPE control status register - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] fpe: FPE configuration input argument. - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_hw_config_fpe(struct osi_core_priv_data *osi_core, - struct osi_fpe_config *fpe) -{ - unsigned int i = 0U; - unsigned int val = 0U; - unsigned int temp = 0U, temp1 = 0U; - unsigned int temp_shift = 0U; - int ret = 0; - - if ((osi_core->hw_feature != OSI_NULL) && - (osi_core->hw_feature->fpe_sel == OSI_DISABLE)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "FPE not supported in HW\n", 0ULL); - return -1; - } - -#ifdef MACSEC_SUPPORT - osi_lock_irq_enabled(&osi_core->macsec_fpe_lock); - /* MACSEC and FPE cannot coexist on MGBE refer bug 3484034 */ - if (osi_core->is_macsec_enabled == OSI_ENABLE) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "FPE and MACSEC cannot co-exist\n", 0ULL); - ret = -1; - goto exit; - } -#endif /* MACSEC_SUPPORT */ - - osi_core->fpe_ready = OSI_DISABLE; - - if (((fpe->tx_queue_preemption_enable << MGBE_MTL_FPE_CTS_PEC_SHIFT) & - MGBE_MTL_FPE_CTS_PEC) == OSI_DISABLE) { - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MTL_FPE_CTS); - val &= ~MGBE_MTL_FPE_CTS_PEC; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - MGBE_MTL_FPE_CTS); - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MAC_FPE_CTS); - val &= ~MGBE_MAC_FPE_CTS_EFPE; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - MGBE_MAC_FPE_CTS); - -#ifdef MACSEC_SUPPORT - osi_core->is_fpe_enabled = OSI_DISABLE; -#endif /* MACSEC_SUPPORT */ - ret = 0; - goto exit; - } - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + - MGBE_MTL_FPE_CTS); - val &= ~MGBE_MTL_FPE_CTS_PEC; - for (i = 0U; i < OSI_MAX_TC_NUM; i++) { - /* max 8 bit for this structure fot TC/TXQ. Set the TC for express or - * preemption. Default is express for a TC. DWCXG_NUM_TC = 8 */ - temp = OSI_BIT(i); - if ((fpe->tx_queue_preemption_enable & temp) == temp) { - temp_shift = i; - temp_shift += MGBE_MTL_FPE_CTS_PEC_SHIFT; - /* set queue for preemtable */ - if (temp_shift < MGBE_MTL_FPE_CTS_PEC_MAX_SHIFT) { - temp1 = OSI_ENABLE; - temp1 = temp1 << temp_shift; - val |= temp1; - } else { - /* Do nothing */ - } - } - } - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + - MGBE_MTL_FPE_CTS); - - if (fpe->rq == 0x0U || fpe->rq >= OSI_MGBE_MAX_NUM_CHANS) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "FPE init failed due to wrong RQ\n", fpe->rq); - ret = -1; - goto exit; - } - - val = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MAC_RQC1R); - val &= ~MGBE_MAC_RQC1R_RQ; - temp = fpe->rq; - temp = temp << MGBE_MAC_RQC1R_RQ_SHIFT; - temp = (temp & MGBE_MAC_RQC1R_RQ); - val |= temp; - osi_core->residual_queue = fpe->rq; - osi_writela(osi_core, val, (unsigned char *) - osi_core->base + MGBE_MAC_RQC1R); - - val = osi_readla(osi_core, (nveu8_t *)osi_core->base + MGBE_MAC_RQC4R); - val &= ~MGBE_MAC_RQC4R_PMCBCQ; - temp = fpe->rq; - temp = temp << MGBE_MAC_RQC4R_PMCBCQ_SHIFT; - temp = (temp & MGBE_MAC_RQC4R_PMCBCQ); - val |= temp; - osi_writela(osi_core, val, (nveu8_t *)osi_core->base + MGBE_MAC_RQC4R); - - /* initiate SVER for SMD-V and SMD-R */ - val = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MTL_FPE_CTS); - val |= MGBE_MAC_FPE_CTS_SVER; - osi_writela(osi_core, val, (unsigned char *) - osi_core->base + MGBE_MAC_FPE_CTS); - - val = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MTL_FPE_ADV); - val &= ~MGBE_MTL_FPE_ADV_HADV_MASK; - //(minimum_fragment_size +IPG/EIPG + Preamble) *.8 ~98ns for10G - val |= MGBE_MTL_FPE_ADV_HADV_VAL; - osi_writela(osi_core, val, (unsigned char *) - osi_core->base + MGBE_MTL_FPE_ADV); - -#ifdef MACSEC_SUPPORT - osi_core->is_fpe_enabled = OSI_ENABLE; -#endif /* MACSEC_SUPPORT */ + osi_writela(osi_core, reg, (nveu8_t *) + osi_core->base + MGBE_MDIO_SCCD); -exit: + ret = mgbe_mdio_busy_wait(osi_core); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, + OSI_LOG_ARG_HW_FAIL, + "MII operation timed out\n", + 0ULL); + goto fail; + } -#ifdef MACSEC_SUPPORT - osi_unlock_irq_enabled(&osi_core->macsec_fpe_lock); -#endif /* MACSEC_SUPPORT */ + reg = osi_readla(osi_core, (nveu8_t *) + osi_core->base + MGBE_MDIO_SCCD); + + data = (reg & MGBE_MDIO_SCCD_SDATA_MASK); + ret = (nve32_t)data; +fail: return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief mgbe_disable_tx_lpi - Helper function to disable Tx LPI. * @@ -5229,14 +3358,14 @@ static int mgbe_hw_config_fpe(struct osi_core_priv_data *osi_core, */ static inline void mgbe_disable_tx_lpi(struct osi_core_priv_data *osi_core) { - unsigned int lpi_csr = 0; + nveu32_t lpi_csr = 0; /* Disable LPI control bits */ - lpi_csr = osi_readla(osi_core, (unsigned char *) + lpi_csr = osi_readla(osi_core, (nveu8_t *) osi_core->base + MGBE_MAC_LPI_CSR); lpi_csr &= ~(MGBE_MAC_LPI_CSR_LPITE | MGBE_MAC_LPI_CSR_LPITXA | MGBE_MAC_LPI_CSR_PLS | MGBE_MAC_LPI_CSR_LPIEN); - osi_writela(osi_core, lpi_csr, (unsigned char *) + osi_writela(osi_core, lpi_csr, (nveu8_t *) osi_core->base + MGBE_MAC_LPI_CSR); } @@ -5259,14 +3388,14 @@ static inline void mgbe_disable_tx_lpi(struct osi_core_priv_data *osi_core) * MAC/PHY should be initialized * */ -static void mgbe_configure_eee(struct osi_core_priv_data *osi_core, - unsigned int tx_lpi_enabled, - unsigned int tx_lpi_timer) +static void mgbe_configure_eee(struct osi_core_priv_data *const osi_core, + const nveu32_t tx_lpi_enabled, + const nveu32_t tx_lpi_timer) { - unsigned int lpi_csr = 0; - unsigned int lpi_timer_ctrl = 0; - unsigned int lpi_entry_timer = 0; - unsigned int tic_counter = 0; + nveu32_t lpi_csr = 0; + nveu32_t lpi_timer_ctrl = 0; + nveu32_t lpi_entry_timer = 0; + nveu32_t tic_counter = 0; void *addr = osi_core->base; if (xpcs_eee(osi_core, tx_lpi_enabled) != 0) { @@ -5293,7 +3422,7 @@ static void mgbe_configure_eee(struct osi_core_priv_data *osi_core, MGBE_LPI_LS_TIMER_MASK); lpi_timer_ctrl |= (MGBE_DEFAULT_LPI_TW_TIMER & MGBE_LPI_TW_TIMER_MASK); - osi_writela(osi_core, lpi_timer_ctrl, (unsigned char *)addr + + osi_writela(osi_core, lpi_timer_ctrl, (nveu8_t *)addr + MGBE_MAC_LPI_TIMER_CTRL); /* 4. For GMII, read the link status of the PHY chip by @@ -5308,7 +3437,7 @@ static void mgbe_configure_eee(struct osi_core_priv_data *osi_core, /* Should be same as (ABP clock freq - 1) = 12 = 0xC, currently * from define but we should get it from pdata->clock TODO */ tic_counter = MGBE_1US_TIC_COUNTER; - osi_writela(osi_core, tic_counter, (unsigned char *)addr + + osi_writela(osi_core, tic_counter, (nveu8_t *)addr + MGBE_MAC_1US_TIC_COUNT); /* 6. Program the MAC_LPI_Auto_Entry_Timer register (LPIET) @@ -5318,7 +3447,7 @@ static void mgbe_configure_eee(struct osi_core_priv_data *osi_core, * to enter LPI mode after all tx is complete. Default 1sec */ lpi_entry_timer |= (tx_lpi_timer & MGBE_LPI_ENTRY_TIMER_MASK); - osi_writela(osi_core, lpi_entry_timer, (unsigned char *)addr + + osi_writela(osi_core, lpi_entry_timer, (nveu8_t *)addr + MGBE_MAC_LPI_EN_TIMER); /* 7. Set LPIATE and LPITXA (bit[20:19]) of @@ -5329,27 +3458,28 @@ static void mgbe_configure_eee(struct osi_core_priv_data *osi_core, * enters the LPI mode after completing all scheduled * packets and remain IDLE for the time indicated by LPIET. */ - lpi_csr = osi_readla(osi_core, (unsigned char *) + lpi_csr = osi_readla(osi_core, (nveu8_t *) addr + MGBE_MAC_LPI_CSR); lpi_csr |= (MGBE_MAC_LPI_CSR_LPITE | MGBE_MAC_LPI_CSR_LPITXA | MGBE_MAC_LPI_CSR_PLS | MGBE_MAC_LPI_CSR_LPIEN); - osi_writela(osi_core, lpi_csr, (unsigned char *) + osi_writela(osi_core, lpi_csr, (nveu8_t *) addr + MGBE_MAC_LPI_CSR); } else { /* Disable LPI control bits */ mgbe_disable_tx_lpi(osi_core); } } +#endif /* !OSI_STRIPPED_LIB */ -static int mgbe_get_hw_features(struct osi_core_priv_data *osi_core, - struct osi_hw_features *hw_feat) +static nve32_t mgbe_get_hw_features(struct osi_core_priv_data *const osi_core, + struct osi_hw_features *hw_feat) { - unsigned char *base = (unsigned char *)osi_core->base; - unsigned int mac_hfr0 = 0; - unsigned int mac_hfr1 = 0; - unsigned int mac_hfr2 = 0; - unsigned int mac_hfr3 = 0; - unsigned int val = 0; + nveu8_t *base = (nveu8_t *)osi_core->base; + nveu32_t mac_hfr0 = 0; + nveu32_t mac_hfr1 = 0; + nveu32_t mac_hfr2 = 0; + nveu32_t mac_hfr3 = 0; + nveu32_t val = 0; mac_hfr0 = osi_readla(osi_core, base + MGBE_MAC_HFR0); mac_hfr1 = osi_readla(osi_core, base + MGBE_MAC_HFR1); @@ -5507,179 +3637,13 @@ static int mgbe_get_hw_features(struct osi_core_priv_data *osi_core, return 0; } -/** - * @brief mgbe_poll_for_tsinit_complete - Poll for time stamp init complete - * - * Algorithm: Read TSINIT value from MAC TCR register until it is - * equal to zero. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] mac_tcr: Address to store time stamp control register read value - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static inline int mgbe_poll_for_tsinit_complete( - struct osi_core_priv_data *osi_core, - unsigned int *mac_tcr) -{ - unsigned int retry = 0U; - - while (retry < OSI_POLL_COUNT) { - /* Read and Check TSINIT in MAC_Timestamp_Control register */ - *mac_tcr = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MAC_TCR); - if ((*mac_tcr & MGBE_MAC_TCR_TSINIT) == 0U) { - return 0; - } - - retry++; - osi_core->osd_ops.udelay(OSI_DELAY_1000US); - } - - return -1; -} - -/** - * @brief mgbe_set_systime - Set system time - * - * Algorithm: Updates system time (seconds and nano seconds) - * in hardware registers - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] sec: Seconds to be configured - * @param[in] nsec: Nano Seconds to be configured - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_set_systime_to_mac(struct osi_core_priv_data *osi_core, - unsigned int sec, - unsigned int nsec) -{ - unsigned int mac_tcr; - void *addr = osi_core->base; - int ret; - - /* To be sure previous write was flushed (if Any) */ - ret = mgbe_poll_for_tsinit_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - /* write seconds value to MAC_System_Time_Seconds_Update register */ - osi_writela(osi_core, sec, (unsigned char *)addr + MGBE_MAC_STSUR); - - /* write nano seconds value to MAC_System_Time_Nanoseconds_Update - * register - */ - osi_writela(osi_core, nsec, (unsigned char *)addr + MGBE_MAC_STNSUR); - - /* issue command to update the configured secs and nsecs values */ - mac_tcr |= MGBE_MAC_TCR_TSINIT; - osi_writela(osi_core, mac_tcr, (unsigned char *)addr + MGBE_MAC_TCR); - - ret = mgbe_poll_for_tsinit_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - return 0; -} - -/** - * @brief mgbe_poll_for_addend_complete - Poll for addend value write complete - * - * Algorithm: Read TSADDREG value from MAC TCR register until it is - * equal to zero. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] mac_tcr: Address to store time stamp control register read value - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static inline int mgbe_poll_for_addend_complete( - struct osi_core_priv_data *osi_core, - unsigned int *mac_tcr) -{ - unsigned int retry = 0U; - - /* Poll */ - while (retry < OSI_POLL_COUNT) { - /* Read and Check TSADDREG in MAC_Timestamp_Control register */ - *mac_tcr = osi_readla(osi_core, (unsigned char *) - osi_core->base + MGBE_MAC_TCR); - if ((*mac_tcr & MGBE_MAC_TCR_TSADDREG) == 0U) { - return 0; - } - - retry++; - osi_core->osd_ops.udelay(OSI_DELAY_1000US); - } - - return -1; -} - -/** - * @brief mgbe_config_addend - Configure addend - * - * Algorithm: Updates the Addend value in HW register - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] addend: Addend value to be configured - * - * @note MAC should be init and started. see osi_start_mac() - * - * @retval 0 on success - * @retval -1 on failure. - */ -static int mgbe_config_addend(struct osi_core_priv_data *osi_core, - unsigned int addend) -{ - unsigned int mac_tcr; - void *addr = osi_core->base; - int ret; - - /* To be sure previous write was flushed (if Any) */ - ret = mgbe_poll_for_addend_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - /* write addend value to MAC_Timestamp_Addend register */ - osi_writela(osi_core, addend, (unsigned char *)addr + MGBE_MAC_TAR); - - /* issue command to update the configured addend value */ - mac_tcr |= MGBE_MAC_TCR_TSADDREG; - osi_writela(osi_core, mac_tcr, (unsigned char *)addr + MGBE_MAC_TCR); - - ret = mgbe_poll_for_addend_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - return 0; -} - /** * @brief mgbe_poll_for_update_ts_complete - Poll for update time stamp * * Algorithm: Read time stamp update value from TCR register until it is * equal to zero. * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. + * @param[in] osi_core: OSI core private data structure. * @param[in] mac_tcr: Address to store time stamp control register read value * * @note MAC should be init and started. see osi_start_mac() @@ -5687,25 +3651,27 @@ static int mgbe_config_addend(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -static inline int mgbe_poll_for_update_ts_complete( +static inline nve32_t mgbe_poll_for_update_ts_complete( struct osi_core_priv_data *osi_core, - unsigned int *mac_tcr) + nveu32_t *mac_tcr) { - unsigned int retry = 0U; + nveu32_t retry = 0U; + nve32_t ret = -1; while (retry < OSI_POLL_COUNT) { /* Read and Check TSUPDT in MAC_Timestamp_Control register */ - *mac_tcr = osi_readla(osi_core, (unsigned char *) + *mac_tcr = osi_readla(osi_core, (nveu8_t *) osi_core->base + MGBE_MAC_TCR); if ((*mac_tcr & MGBE_MAC_TCR_TSUPDT) == 0U) { - return 0; + ret = 0; + break; } retry++; osi_core->osd_ops.udelay(OSI_DELAY_1000US); } - return -1; + return ret; } /** @@ -5713,8 +3679,7 @@ static inline int mgbe_poll_for_update_ts_complete( * * Algorithm: Update MAC time with system time * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. + * @param[in] osi_core: OSI core private data structure. * @param[in] sec: Seconds to be configured * @param[in] nsec: Nano seconds to be configured * @param[in] add_sub: To decide on add/sub with system time @@ -5726,21 +3691,25 @@ static inline int mgbe_poll_for_update_ts_complete( * @retval 0 on success * @retval -1 on failure. */ -static int mgbe_adjust_mactime(struct osi_core_priv_data *osi_core, - unsigned int sec, unsigned int nsec, - unsigned int add_sub, - unsigned int one_nsec_accuracy) +static nve32_t mgbe_adjust_mactime(struct osi_core_priv_data *const osi_core, + const nveu32_t sec, const nveu32_t nsec, + const nveu32_t add_sub, + const nveu32_t one_nsec_accuracy) { void *addr = osi_core->base; - unsigned int mac_tcr; - unsigned int value = 0; - unsigned long long temp = 0; - int ret; + nveu32_t mac_tcr; + nveu32_t value = 0; + nveul64_t temp = 0; + nveu32_t temp_sec; + nveu32_t temp_nsec; + nve32_t ret = 0; + temp_sec = sec; + temp_nsec = nsec; /* To be sure previous write was flushed (if Any) */ ret = mgbe_poll_for_update_ts_complete(osi_core, &mac_tcr); if (ret == -1) { - return -1; + goto fail; } if (add_sub != 0U) { @@ -5748,9 +3717,9 @@ static int mgbe_adjust_mactime(struct osi_core_priv_data *osi_core, * the system time, then MAC_STSUR reg should be * programmed with (2^32 – ) */ - temp = (TWO_POWER_32 - sec); + temp = (TWO_POWER_32 - temp_sec); if (temp < UINT_MAX) { - sec = (unsigned int)temp; + temp_sec = (nveu32_t)temp; } else { /* do nothing here */ } @@ -5762,192 +3731,35 @@ static int mgbe_adjust_mactime(struct osi_core_priv_data *osi_core, * (2^32 - if MAC_TCR.TSCTRLSSR is reset) */ if (one_nsec_accuracy == OSI_ENABLE) { - if (nsec < UINT_MAX) { - nsec = (TEN_POWER_9 - nsec); + if (temp_nsec < UINT_MAX) { + temp_nsec = (TEN_POWER_9 - temp_nsec); } } else { - if (nsec < UINT_MAX) { - nsec = (TWO_POWER_31 - nsec); + if (temp_nsec < UINT_MAX) { + temp_nsec = (TWO_POWER_31 - temp_nsec); } } } /* write seconds value to MAC_System_Time_Seconds_Update register */ - osi_writela(osi_core, sec, (unsigned char *)addr + MGBE_MAC_STSUR); + osi_writela(osi_core, temp_sec, (nveu8_t *)addr + MGBE_MAC_STSUR); /* write nano seconds value and add_sub to * MAC_System_Time_Nanoseconds_Update register */ - value |= nsec; + value |= temp_nsec; value |= (add_sub << MGBE_MAC_STNSUR_ADDSUB_SHIFT); - osi_writela(osi_core, value, (unsigned char *)addr + MGBE_MAC_STNSUR); + osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_STNSUR); /* issue command to initialize system time with the value * specified in MAC_STSUR and MAC_STNSUR */ mac_tcr |= MGBE_MAC_TCR_TSUPDT; - osi_writela(osi_core, mac_tcr, (unsigned char *)addr + MGBE_MAC_TCR); + osi_writela(osi_core, mac_tcr, (nveu8_t *)addr + MGBE_MAC_TCR); ret = mgbe_poll_for_update_ts_complete(osi_core, &mac_tcr); - if (ret == -1) { - return -1; - } - - return 0; -} - -/** - * @brief mgbe_config_tscr - Configure Time Stamp Register - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] ptp_filter: PTP rx filter parameters - * - * @note MAC should be init and started. see osi_start_mac() - */ -static void mgbe_config_tscr(struct osi_core_priv_data *osi_core, - unsigned int ptp_filter) -{ - struct core_local *l_core = (struct core_local *)osi_core; - unsigned int mac_tcr = 0; - nveu32_t value = 0x0U; - void *addr = osi_core->base; - - if (ptp_filter != OSI_DISABLE) { - mac_tcr = (OSI_MAC_TCR_TSENA | - OSI_MAC_TCR_TSCFUPDT | - OSI_MAC_TCR_TSCTRLSSR); - - if ((ptp_filter & OSI_MAC_TCR_SNAPTYPSEL_1) == - OSI_MAC_TCR_SNAPTYPSEL_1) { - mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_1; - } - if ((ptp_filter & OSI_MAC_TCR_SNAPTYPSEL_2) == - OSI_MAC_TCR_SNAPTYPSEL_2) { - mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_2; - } - if ((ptp_filter & OSI_MAC_TCR_SNAPTYPSEL_3) == - OSI_MAC_TCR_SNAPTYPSEL_3) { - mac_tcr |= OSI_MAC_TCR_SNAPTYPSEL_3; - } - if ((ptp_filter & OSI_MAC_TCR_TSIPV4ENA) == - OSI_MAC_TCR_TSIPV4ENA) { - mac_tcr |= OSI_MAC_TCR_TSIPV4ENA; - } - if ((ptp_filter & OSI_MAC_TCR_TSIPV6ENA) == - OSI_MAC_TCR_TSIPV6ENA) { - mac_tcr |= OSI_MAC_TCR_TSIPV6ENA; - } - if ((ptp_filter & OSI_MAC_TCR_TSEVENTENA) == - OSI_MAC_TCR_TSEVENTENA) { - mac_tcr |= OSI_MAC_TCR_TSEVENTENA; - } - if ((ptp_filter & OSI_MAC_TCR_TSMASTERENA) == - OSI_MAC_TCR_TSMASTERENA) { - mac_tcr |= OSI_MAC_TCR_TSMASTERENA; - } - if ((ptp_filter & OSI_MAC_TCR_TSVER2ENA) == - OSI_MAC_TCR_TSVER2ENA) { - mac_tcr |= OSI_MAC_TCR_TSVER2ENA; - } - if ((ptp_filter & OSI_MAC_TCR_TSIPENA) == - OSI_MAC_TCR_TSIPENA) { - mac_tcr |= OSI_MAC_TCR_TSIPENA; - } - if ((ptp_filter & OSI_MAC_TCR_AV8021ASMEN) == - OSI_MAC_TCR_AV8021ASMEN) { - mac_tcr |= OSI_MAC_TCR_AV8021ASMEN; - } - if ((ptp_filter & OSI_MAC_TCR_TSENALL) == - OSI_MAC_TCR_TSENALL) { - mac_tcr |= OSI_MAC_TCR_TSENALL; - } - if ((ptp_filter & OSI_MAC_TCR_CSC) == - OSI_MAC_TCR_CSC) { - mac_tcr |= OSI_MAC_TCR_CSC; - } - } else { - /* Disabling the MAC time stamping */ - mac_tcr = OSI_DISABLE; - } - - osi_writela(osi_core, mac_tcr, (unsigned char *)addr + MGBE_MAC_TCR); - - value = osi_readla(osi_core, (nveu8_t *)addr + MGBE_MAC_PPS_CTL); - value &= ~MGBE_MAC_PPS_CTL_PPSCTRL0; - if (l_core->pps_freq == OSI_ENABLE) { - value |= OSI_ENABLE; - } - osi_writela(osi_core, value, (nveu8_t *)addr + MGBE_MAC_PPS_CTL); -} - -/** - * @brief mgbe_config_ssir - Configure SSIR - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] ptp_clock: PTP required clock frequency - * - * @note MAC should be init and started. see osi_start_mac() - */ -static void mgbe_config_ssir(struct osi_core_priv_data *const osi_core, - const unsigned int ptp_clock) -{ - unsigned long long val; - unsigned int mac_tcr; - void *addr = osi_core->base; - - mac_tcr = osi_readla(osi_core, (unsigned char *)addr + MGBE_MAC_TCR); - - /* convert the PTP required clock frequency to nano second. - * formula is : ((1/ptp_clock) * 1000000000) - * where, ptp_clock = OSI_PTP_REQ_CLK_FREQ if FINE correction - * and ptp_clock = PTP reference clock if COARSE correction - */ - if ((mac_tcr & MGBE_MAC_TCR_TSCFUPDT) == MGBE_MAC_TCR_TSCFUPDT) { - if (osi_core->pre_si == OSI_ENABLE) { - val = OSI_PTP_SSINC_16; - } else { - /* For silicon */ - val = OSI_PTP_SSINC_4; - } - } else { - val = ((1U * OSI_NSEC_PER_SEC) / ptp_clock); - } - - /* 0.465ns accurecy */ - if ((mac_tcr & MGBE_MAC_TCR_TSCTRLSSR) == 0U) { - if (val < UINT_MAX) { - val = (val * 1000U) / 465U; - } - } - - val |= (val << MGBE_MAC_SSIR_SSINC_SHIFT); - - /* update Sub-second Increment Value */ - if (val < UINT_MAX) { - osi_writela(osi_core, (unsigned int)val, - (unsigned char *)addr + MGBE_MAC_SSIR); - } -} - -/** - * @brief mgbe_set_mode - Setting the mode. - * - * @param[in] osi_core: OSI core private data structure. - * @param[in] mode: mode to be set. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: Yes - * @retval 0 - */ -static nve32_t mgbe_set_mode(OSI_UNUSED - struct osi_core_priv_data *const osi_core, - OSI_UNUSED const nve32_t mode) -{ - return 0; +fail: + return ret; } /** @@ -5964,7 +3776,7 @@ static nve32_t mgbe_set_mode(OSI_UNUSED * @retval 0 */ static nveu32_t mgbe_read_reg(struct osi_core_priv_data *const osi_core, - const nve32_t reg) + const nve32_t reg) { return osi_readla(osi_core, (nveu8_t *)osi_core->base + reg); } @@ -6033,25 +3845,7 @@ static nveu32_t mgbe_write_macsec_reg(struct osi_core_priv_data *const osi_core, } #endif /* MACSEC_SUPPORT */ -/** - * @brief mgbe_validate_core_regs - Validates MGBE core registers. - * - * @param[in] osi_core: OSI core private data structure. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: Yes - * @retval 0 - */ -static nve32_t mgbe_validate_core_regs( - OSI_UNUSED - struct osi_core_priv_data *const osi_core) -{ - return 0; -} - +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_write_reg - Write a reg * @@ -6114,8 +3908,9 @@ static void mgbe_set_mdc_clk_rate(OSI_UNUSED const nveu64_t csr_clk_rate) { } +#endif /* !OSI_STRIPPED_LIB */ -#ifdef MACSEC_SUPPORT +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /** * @brief mgbe_config_for_macsec - Configure MAC according to macsec IAS * @@ -6145,9 +3940,9 @@ static void mgbe_config_for_macsec(struct osi_core_priv_data *const osi_core, nveu32_t value = 0U, temp = 0U; if ((enable != OSI_ENABLE) && (enable != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Failed to config MGBE per MACSEC\n", 0ULL); - return; + goto done; } /* stop MAC Tx */ mgbe_config_mac_tx(osi_core, OSI_DISABLE); @@ -6209,6 +4004,8 @@ static void mgbe_config_for_macsec(struct osi_core_priv_data *const osi_core, 0ULL); } } +done: + return; } #endif /* MACSEC_SUPPORT */ @@ -6217,68 +4014,46 @@ static void mgbe_config_for_macsec(struct osi_core_priv_data *const osi_core, */ void mgbe_init_core_ops(struct core_ops *ops) { - ops->poll_for_swr = mgbe_poll_for_swr; ops->core_init = mgbe_core_init; - ops->core_deinit = mgbe_core_deinit; - ops->validate_regs = mgbe_validate_core_regs; - ops->start_mac = mgbe_start_mac; - ops->stop_mac = mgbe_stop_mac; ops->handle_common_intr = mgbe_handle_common_intr; - /* only MGBE supports full duplex */ - ops->set_mode = mgbe_set_mode; - /* by default speed is 10G */ - ops->set_speed = mgbe_set_speed; ops->pad_calibrate = mgbe_pad_calibrate; - ops->set_mdc_clk_rate = mgbe_set_mdc_clk_rate; - ops->flush_mtl_tx_queue = mgbe_flush_mtl_tx_queue; - ops->config_mac_loopback = mgbe_config_mac_loopback; - ops->set_avb_algorithm = mgbe_set_avb_algorithm; - ops->get_avb_algorithm = mgbe_get_avb_algorithm, - ops->config_fw_err_pkts = mgbe_config_fw_err_pkts; - ops->config_tx_status = mgbe_config_tx_status; - ops->config_rx_crc_check = mgbe_config_rx_crc_check; - ops->config_flow_control = mgbe_config_flow_control; - ops->config_arp_offload = mgbe_config_arp_offload; - ops->config_ptp_offload = mgbe_config_ptp_offload; - ops->config_rxcsum_offload = mgbe_config_rxcsum_offload; - ops->config_mac_pkt_filter_reg = mgbe_config_mac_pkt_filter_reg; ops->update_mac_addr_low_high_reg = mgbe_update_mac_addr_low_high_reg; - ops->config_l3_l4_filter_enable = mgbe_config_l3_l4_filter_enable; - ops->config_l3_filters = mgbe_config_l3_filters; - ops->update_ip4_addr = mgbe_update_ip4_addr; - ops->update_ip6_addr = mgbe_update_ip6_addr; - ops->config_l4_filters = mgbe_config_l4_filters; - ops->update_l4_port_no = mgbe_update_l4_port_no; - ops->config_vlan_filtering = mgbe_config_vlan_filtering; - ops->set_systime_to_mac = mgbe_set_systime_to_mac; - ops->config_addend = mgbe_config_addend; ops->adjust_mactime = mgbe_adjust_mactime; - ops->config_tscr = mgbe_config_tscr; - ops->config_ssir = mgbe_config_ssir, - ops->config_ptp_rxq = mgbe_config_ptp_rxq; + ops->read_mmc = mgbe_read_mmc; ops->write_phy_reg = mgbe_write_phy_reg; ops->read_phy_reg = mgbe_read_phy_reg; - ops->save_registers = mgbe_save_registers; - ops->restore_registers = mgbe_restore_registers; - ops->read_mmc = mgbe_read_mmc; - ops->reset_mmc = mgbe_reset_mmc; - ops->configure_eee = mgbe_configure_eee; ops->get_hw_features = mgbe_get_hw_features; - ops->config_rss = mgbe_config_rss; - ops->hw_config_est = mgbe_hw_config_est; - ops->hw_config_fpe = mgbe_hw_config_fpe; + ops->read_reg = mgbe_read_reg; + ops->write_reg = mgbe_write_reg; + ops->set_avb_algorithm = mgbe_set_avb_algorithm; + ops->get_avb_algorithm = mgbe_get_avb_algorithm; ops->config_frp = mgbe_config_frp; ops->update_frp_entry = mgbe_update_frp_entry; ops->update_frp_nve = mgbe_update_frp_nve; - ops->ptp_tsc_capture = mgbe_ptp_tsc_capture; - ops->write_reg = mgbe_write_reg; - ops->read_reg = mgbe_read_reg; #ifdef MACSEC_SUPPORT - ops->write_macsec_reg = mgbe_write_macsec_reg; ops->read_macsec_reg = mgbe_read_macsec_reg; + ops->write_macsec_reg = mgbe_write_macsec_reg; +#ifndef OSI_STRIPPED_LIB ops->macsec_config_mac = mgbe_config_for_macsec; +#endif /* !OSI_STRIPPED_LIB */ #endif /* MACSEC_SUPPORT */ + ops->config_l3l4_filters = mgbe_config_l3l4_filters; +#ifndef OSI_STRIPPED_LIB + ops->config_tx_status = mgbe_config_tx_status; + ops->config_rx_crc_check = mgbe_config_rx_crc_check; + ops->config_flow_control = mgbe_config_flow_control; + ops->config_arp_offload = mgbe_config_arp_offload; + ops->config_ptp_offload = mgbe_config_ptp_offload; + ops->config_vlan_filtering = mgbe_config_vlan_filtering; + ops->reset_mmc = mgbe_reset_mmc; + ops->configure_eee = mgbe_configure_eee; + ops->set_mdc_clk_rate = mgbe_set_mdc_clk_rate; + ops->config_mac_loopback = mgbe_config_mac_loopback; + ops->config_rss = mgbe_config_rss; + ops->config_ptp_rxq = mgbe_config_ptp_rxq; +#endif /* !OSI_STRIPPED_LIB */ #ifdef HSI_SUPPORT ops->core_hsi_configure = mgbe_hsi_configure; + ops->core_hsi_inject_err = mgbe_hsi_inject_err; #endif }; diff --git a/kernel/nvethernetrm/osi/core/mgbe_core.h b/kernel/nvethernetrm/osi/core/mgbe_core.h index 8fc33680a6..691432bb15 100644 --- a/kernel/nvethernetrm/osi/core/mgbe_core.h +++ b/kernel/nvethernetrm/osi/core/mgbe_core.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,6 +23,80 @@ #ifndef MGBE_CORE_H_ #define MGBE_CORE_H_ +#ifndef OSI_STRIPPED_LIB +#define MGBE_MAC_PFR 0x0008 +#define MGBE_MAC_RX_FLW_CTRL 0x0090 +#define MGBE_MAC_RQC2R 0x00A8 +#define MGBE_MAC_QX_TX_FLW_CTRL(x) ((0x0004U * (x)) + 0x0070U) +#define MGBE_MAC_ARPPA 0x0C10 +#define MGBE_MAC_LPI_CSR 0x00D0 +#define MGBE_MAC_LPI_TIMER_CTRL 0x00D4 +#define MGBE_MAC_LPI_EN_TIMER 0x00D8 +#define MGBE_MAC_RSS_CTRL 0x0C80 +#define MGBE_MAC_RSS_ADDR 0x0C88 +#define MGBE_MAC_RSS_DATA 0x0C8C +#define MGBE_MAC_STSR 0x0D08 +#define MGBE_MAC_STNSR 0x0D0C +#define MGBE_MAC_PTO_CR 0x0DC0 +#define MGBE_MAC_PIDR0 0x0DC4 +#define MGBE_MAC_PIDR1 0x0DC8 +#define MGBE_MAC_PIDR2 0x0DCC +#define MGBE_MAC_PMTCSR 0x00C0 +#define MGBE_MAC_HTR_REG(x) ((0x0004U * (x)) + 0x0010U) +#define MGBE_WRAP_AXI_ASID0_CTRL 0x8400 +#define MGBE_WRAP_AXI_ASID1_CTRL 0x8404 +#define MGBE_WRAP_AXI_ASID2_CTRL 0x8408 +#define MGBE_MAC_PFR_VTFE OSI_BIT(16) +#define MGBE_MAC_PFR_IPFE OSI_BIT(20) +#define MGBE_MAC_PFR_IPFE_SHIFT 20 +#define MGBE_SID_VAL1(x) (((x) << 24U) |\ + ((x) << 16U) |\ + ((x) << 8U) |\ + (x)) +#define MGBE_SID_VAL2(x) (((x) << 8U) |\ + (x)) +#define MGBE0_SID ((nveu32_t)0x6U) +#define MGBE1_SID ((nveu32_t)0x49U) +#define MGBE2_SID ((nveu32_t)0x4AU) +#define MGBE3_SID ((nveu32_t)0x4BU) +#define MGBE_MAC_PAUSE_TIME 0xFFFF0000U +#define MGBE_MAC_PAUSE_TIME_MASK 0xFFFF0000U +#define MGBE_MAC_VLAN_TR_VTHM OSI_BIT(25) +#define MGBE_MAC_VLAN_TR_VTIM OSI_BIT(17) +#define MGBE_MAC_VLAN_TR_VTIM_SHIFT 17 +/** + * @addtogroup MGBE MAC hash table defines + * + * @brief MGBE MAC hash table Control register + * filed type defines. + * @{ + */ +#define MGBE_MAX_HTR_REGS 4U +/** @} */ + +#define MGBE_MAX_VLAN_FILTER 32U +#define MGBE_MAC_RX_FLW_CTRL_RFE OSI_BIT(0) +#define MGBE_MAC_STNSR_TSSS_MASK 0x7FFFFFFFU +#define MGBE_MAC_TCR_SNAPTYPSEL_SHIFT 16U +#define MGBE_MAC_TCR_TSENMACADDR OSI_BIT(18) +#define MGBE_MAC_TMCR_IPG_MASK 0x700U +#define MGBE_MAC_RQC1R_PTPQ_SHIFT 24U +#define MGBE_MAC_RQC1R_PTPQ (OSI_BIT(27) | OSI_BIT(26) | \ + OSI_BIT(25) | OSI_BIT(24)) +#define MGBE_MAC_RMCR_LM OSI_BIT(10) +#define MGBE_MAC_RMCR_ARPEN OSI_BIT(31) +#define MGBE_MAC_QX_TX_FLW_CTRL_TFE OSI_BIT(1) +#define MGBE_MAC_TMCR_IFP OSI_BIT(11) +#define MGBE_MAC_RQC1R_TPQC0 OSI_BIT(21) +#define MGBE_MAC_RQC1R_OMCBCQ OSI_BIT(20) +#define MGBE_MAC_RSS_CTRL_RSSE OSI_BIT(0) +#define MGBE_MAC_RSS_CTRL_IP2TE OSI_BIT(1) +#define MGBE_MAC_RSS_CTRL_TCP4TE OSI_BIT(2) +#define MGBE_MAC_RSS_CTRL_UDP4TE OSI_BIT(3) +#define MGBE_MAC_RSS_ADDR_ADDRT OSI_BIT(2) +#define MGBE_MAC_RSS_ADDR_RSSIA_SHIFT 8U +#define MGBE_MAC_RSS_ADDR_OB OSI_BIT(0) +#define MGBE_MAC_RSS_ADDR_CT OSI_BIT(1) /** * @addtogroup - MGBE-LPI LPI configuration macros * @@ -33,26 +107,220 @@ * PHY should be up before the LPI pattern can be transmitted to the PHY. * Default 1sec. */ -#define MGBE_DEFAULT_LPI_LS_TIMER (unsigned int)1000 -#define MGBE_LPI_LS_TIMER_MASK 0x3FFU -#define MGBE_LPI_LS_TIMER_SHIFT 16U +#define MGBE_DEFAULT_LPI_LS_TIMER ((nveu32_t)1000) +#define MGBE_LPI_LS_TIMER_MASK 0x3FFU +#define MGBE_LPI_LS_TIMER_SHIFT 16U /* LPI TW timer - minimum time (in microseconds) for which MAC wait after it * stops transmitting LPI pattern before resuming normal tx. * Default 21us */ -#define MGBE_DEFAULT_LPI_TW_TIMER 0x15U -#define MGBE_LPI_TW_TIMER_MASK 0xFFFFU +#define MGBE_DEFAULT_LPI_TW_TIMER 0x15U +#define MGBE_LPI_TW_TIMER_MASK 0xFFFFU /* LPI entry timer - Time in microseconds that MAC will wait to enter LPI mode * after all tx is complete. * Default 1sec. */ -#define MGBE_LPI_ENTRY_TIMER_MASK 0xFFFF8U +#define MGBE_LPI_ENTRY_TIMER_MASK 0xFFFF8U /* 1US TIC counter - This counter should be programmed with the number of clock * cycles of CSR clock that constitutes a period of 1us. * it should be APB clock in MHZ i.e 480-1 for silicon and 13MHZ-1 for uFPGA */ -#define MGBE_1US_TIC_COUNTER 0x1DF +#define MGBE_1US_TIC_COUNTER 0x1DF +#define MGBE_MAC_1US_TIC_COUNT 0x00DC +/** @} */ +#define MGBE_MAC_PTO_CR_PTOEN OSI_BIT(0) +#define MGBE_MAC_PTO_CR_ASYNCEN OSI_BIT(1) +#define MGBE_MAC_PTO_CR_APDREQEN OSI_BIT(2) +#define MGBE_MAC_PTO_CR_DN (OSI_BIT(15) | OSI_BIT(14) | \ + OSI_BIT(13) | OSI_BIT(12) | \ + OSI_BIT(11) | OSI_BIT(10) | \ + OSI_BIT(9) | OSI_BIT(8)) +#define MGBE_MAC_PTO_CR_DN_SHIFT 8U +#define MGBE_DMA_CHX_STATUS_RPS OSI_BIT(8) +#define MGBE_DMA_CHX_STATUS_TPS OSI_BIT(1) +#define MGBE_DMA_CHX_STATUS_TBU OSI_BIT(2) +#define MGBE_DMA_CHX_STATUS_RBU OSI_BIT(7) +#define MGBE_DMA_CHX_STATUS_FBE OSI_BIT(12) +#define MGBE_MAC_LPI_CSR_LPITE OSI_BIT(20) +#define MGBE_MAC_LPI_CSR_LPITXA OSI_BIT(19) +#define MGBE_MAC_LPI_CSR_PLS OSI_BIT(17) +#define MGBE_MAC_LPI_CSR_LPIEN OSI_BIT(16) +#define MGBE_MAC_PFR_VTFE_SHIFT 16 +#define MGBE_MAC_PIDR_PID_MASK 0XFFFFU + +#define MGBE_MTL_RXP_BYPASS_CNT 2U +#define MGBE_MAC_FPE_CTS_SVER OSI_BIT(1) + +#endif /* !OSI_STRIPPED_LIB */ + +#define MGBE_MAC_RX_TX_STS 0x00B8 +#define MGBE_MTL_EST_CONTROL 0x1050 +#define MGBE_MTL_EST_OVERHEAD 0x1054 +#define MGBE_MTL_EST_STATUS 0x1058 +#define MGBE_MTL_EST_SCH_ERR 0x1060 +#define MGBE_MTL_EST_FRMS_ERR 0x1064 +#define MGBE_MTL_EST_ITRE 0x1070 +#define MGBE_MTL_EST_GCL_CONTROL 0x1080 +#define MGBE_MTL_EST_DATA 0x1084 +#define MGBE_MAC_RQC4R 0x0094 +#define MGBE_MAC_FPE_CTS 0x0280 +#define MGBE_MTL_RXP_CS 0x10A0 +#define MGBE_MTL_RXP_INTR_CS 0x10A4 +#define MGBE_MTL_RXP_IND_CS 0x10B0 +#define MGBE_MTL_RXP_IND_DATA 0x10B4 + +#define MGBE_MAC_TX_PCE OSI_BIT(13) +#define MGBE_MAC_TX_IHE OSI_BIT(12) +#define MGBE_MAC_TX_TJT OSI_BIT(0) +#define MGBE_MTL_TCQ_ETS_HCR(x) ((0x0080U * (x)) + 0x1120U) +#define MGBE_MTL_TCQ_ETS_LCR(x) ((0x0080U * (x)) + 0x1124U) +#define MGBE_MTL_TCQ_ETS_SSCR(x) ((0x0080U * (x)) + 0x111CU) +#define MGBE_MTL_OP_MODE 0x1000 +#define MGBE_MTL_INTR_STATUS 0x1020 +#define MGBE_MTL_FPE_CTS 0x1090 +#define MGBE_MTL_FPE_ADV 0x1094 + +#define MGBE_MTL_QINT_STATUS(x) ((0x0080U * (x)) + 0x1174U) +#define MGBE_MTL_TCQ_ETS_CR_AVALG_SHIFT 0U +#define MGBE_MTL_QINT_TXUNIFS OSI_BIT(0) +#define MGBE_MTL_TX_OP_MODE_Q2TCMAP (OSI_BIT(10) | OSI_BIT(9) |\ + OSI_BIT(8)) +#define MGBE_MTL_TX_OP_MODE_Q2TCMAP_SHIFT 8U +#define MGBE_MTL_TX_OP_MODE_TXQEN (OSI_BIT(3) | OSI_BIT(2)) +#define MGBE_MTL_TX_OP_MODE_TXQEN_SHIFT 2U +#define MGBE_MTL_TCQ_ETS_CR_CC OSI_BIT(3) +#define MGBE_MTL_TCQ_ETS_CR_CC_SHIFT 3U +#define MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK 0x001FFFFFU +#define MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK 0x0000FFFFU +#define MGBE_MTL_TCQ_ETS_HCR_HC_MASK 0x1FFFFFFFU +#define MGBE_MTL_TCQ_ETS_LCR_LC_MASK 0x1FFFFFFFU + +#define MGBE_8PTP_CYCLE 26U +#define MGBE_PTP_CLK_SPEED 312500000U +#define MGBE_DMA_ISR_MTLIS OSI_BIT(16) +#define MGBE_IMR_TXESIE OSI_BIT(13) +#define MGBE_IMR_FPEIE OSI_BIT(15) +#ifndef OSI_STRIPPED_LIB +#define MGBE_MAC_EXT_CNF_EIPG 0x1U +#define MGBE_MAC_EXT_CNF_EIPG_MASK 0x7FU +#endif /* !OSI_STRIPPED_LIB */ +#define MGBE_MAC_RQC4R_PMCBCQ (OSI_BIT(27) | OSI_BIT(26) | \ + OSI_BIT(25) | OSI_BIT(24)) +#define MGBE_MAC_RQC4R_PMCBCQ_SHIFT 24U +#define MGBE_MAC_RQC1R_RQ_SHIFT 4U +#define MGBE_MTL_EST_EEST OSI_BIT(0) +/* EST GCL controlOSI_BITmap */ +#define MGBE_MTL_EST_ADDR_SHIFT 8 +/*EST MTL interrupt STATUS and ERR*/ +#define MGBE_MTL_IS_ESTIS OSI_BIT(18) +/* MTL_EST_STATUS*/ +#define MGBE_MTL_EST_STATUS_CGCE OSI_BIT(4) +#define MGBE_MTL_EST_STATUS_HLBS OSI_BIT(3) +#define MGBE_MTL_EST_STATUS_HLBF OSI_BIT(2) +#define MGBE_MTL_EST_STATUS_BTRE OSI_BIT(1) +#define MGBE_MTL_EST_STATUS_SWLC OSI_BIT(0) +/* MAC FPE control/statusOSI_BITmap */ +#define MGBE_MAC_FPE_CTS_EFPE OSI_BIT(0) +#define MGBE_MAC_FPE_CTS_TRSP OSI_BIT(19) +#define MGBE_MAC_FPE_CTS_TVER OSI_BIT(18) +#define MGBE_MAC_FPE_CTS_RVER OSI_BIT(16) +#define MGBE_MAC_FPE_CTS_SRSP OSI_BIT(2) +/* MTL FPE adv registers */ +#define MGBE_MAC_IMR_FPEIS OSI_BIT(16) +#define MGBE_MAC_FPE_CTS_RRSP OSI_BIT(17) +/* MTL_EST_CONTROL */ +#define MGBE_MTL_EST_CONTROL_PTOV (OSI_BIT(23) | OSI_BIT(24) | \ + OSI_BIT(25) | OSI_BIT(26) | \ + OSI_BIT(27) | OSI_BIT(28) | \ + OSI_BIT(29) | OSI_BIT(30) | \ + OSI_BIT(31)) +#define MGBE_MTL_EST_CONTROL_PTOV_SHIFT 23U +#define MGBE_MTL_EST_PTOV_RECOMMEND 32U +#define MGBE_MTL_EST_CONTROL_CTOV (OSI_BIT(11) | OSI_BIT(12) | \ + OSI_BIT(13) | OSI_BIT(14) | \ + OSI_BIT(15) | OSI_BIT(16) | \ + OSI_BIT(17) | OSI_BIT(18) | \ + OSI_BIT(19) | OSI_BIT(20) | \ + OSI_BIT(21) | OSI_BIT(22)) +#define MGBE_MTL_EST_CONTROL_CTOV_SHIFT 11U +#define MGBE_MTL_EST_CTOV_RECOMMEND 42U +#define MGBE_MAC_RQC1R_RQ (OSI_BIT(7) | OSI_BIT(6) | \ + OSI_BIT(5) | OSI_BIT(4)) + +/** + * @addtogroup MGBE-MTL-FRP FRP Indirect Access register defines + * + * @brief MGBE MTL FRP register defines + * @{ + */ +#define MGBE_MTL_FRP_READ_UDELAY 1U +#define MGBE_MTL_FRP_READ_RETRY 1000U + +#define MGBE_MTL_OP_MODE_FRPE OSI_BIT(15) +/* FRP Control and Status register defines */ +#define MGBE_MTL_RXP_CS_RXPI OSI_BIT(31) +#define MGBE_MTL_RXP_CS_NPE (OSI_BIT(23) | OSI_BIT(22) | \ + OSI_BIT(21) | OSI_BIT(20) | \ + OSI_BIT(19) | OSI_BIT(18) | \ + OSI_BIT(17) | OSI_BIT(16)) +#define MGBE_MTL_RXP_CS_NPE_SHIFT 16U +#define MGBE_MTL_RXP_CS_NVE (OSI_BIT(7) | OSI_BIT(6) | \ + OSI_BIT(5) | OSI_BIT(4) | \ + OSI_BIT(3) | OSI_BIT(2) | \ + OSI_BIT(1) | OSI_BIT(0)) +/* FRP Interrupt Control and Status register */ +#define MGBE_MTL_RXP_INTR_CS_PDRFIE OSI_BIT(19) +#define MGBE_MTL_RXP_INTR_CS_FOOVIE OSI_BIT(18) +#define MGBE_MTL_RXP_INTR_CS_NPEOVIE OSI_BIT(17) +#define MGBE_MTL_RXP_INTR_CS_NVEOVIE OSI_BIT(16) +#define MGBE_MTL_RXP_INTR_CS_PDRFIS OSI_BIT(3) +#define MGBE_MTL_RXP_INTR_CS_FOOVIS OSI_BIT(2) +#define MGBE_MTL_RXP_INTR_CS_NPEOVIS OSI_BIT(1) +#define MGBE_MTL_RXP_INTR_CS_NVEOVIS OSI_BIT(0) +/* Indirect Instruction Table defines */ +#define MGBE_MTL_FRP_IE0(x) (((x) * 0x4U) + 0x0U) +#define MGBE_MTL_FRP_IE1(x) (((x) * 0x4U) + 0x1U) +#define MGBE_MTL_FRP_IE2(x) (((x) * 0x4U) + 0x2U) +#define MGBE_MTL_FRP_IE3(x) (((x) * 0x4U) + 0x3U) +#define MGBE_MTL_FRP_IE2_DCH (OSI_BIT(31) | OSI_BIT(30) | \ + OSI_BIT(29) | OSI_BIT(28) | \ + OSI_BIT(27) | OSI_BIT(26) | \ + OSI_BIT(25) | OSI_BIT(24)) +#define MGBE_MTL_FRP_IE2_DCH_SHIFT 24U +#define MGBE_MTL_FRP_IE2_OKI (OSI_BIT(23) | OSI_BIT(22) | \ + OSI_BIT(21) | OSI_BIT(20) | \ + OSI_BIT(19) | OSI_BIT(18) | \ + OSI_BIT(17) | OSI_BIT(16)) +#define MGBE_MTL_FRP_IE2_OKI_SHIFT 16U +#define MGBE_MTL_FRP_IE2_FO (OSI_BIT(13) | OSI_BIT(12) | \ + OSI_BIT(11) | OSI_BIT(10) | \ + OSI_BIT(9) | OSI_BIT(8)) +#define MGBE_MTL_FRP_IE2_FO_SHIFT 8U +#define MGBE_MTL_FRP_IE2_NC OSI_BIT(3) +#define MGBE_MTL_FRP_IE2_IM OSI_BIT(2) +#define MGBE_MTL_FRP_IE2_RF OSI_BIT(1) +#define MGBE_MTL_FRP_IE2_AF OSI_BIT(0) +#define MGBE_MTL_FRP_IE3_DCH_MASK 0xFFFFU +/* Indirect register defines */ +#define MGBE_MTL_RXP_IND_CS_BUSY OSI_BIT(31) +#define MGBE_MTL_RXP_IND_CS_ACCSEL OSI_BIT(24) +#define MGBE_MTL_RXP_IND_CS_WRRDN OSI_BIT(16) +#define MGBE_MTL_RXP_IND_CS_ADDR (OSI_BIT(9) | OSI_BIT(8) | \ + OSI_BIT(7) | OSI_BIT(6) | \ + OSI_BIT(5) | OSI_BIT(4) | \ + OSI_BIT(3) | OSI_BIT(2) | \ + OSI_BIT(1) | OSI_BIT(0)) +/** @} */ + +/** + * @addtogroup MGBE MTL queue ETS algorithm mode + * + * @brief MTL queue algorithm type + * @{ + */ +#define OSI_MGBE_TXQ_AVALG_ETS 2U +#define MGBE_MTL_TCQ_ETS_CR_AVALG (OSI_BIT(1) | OSI_BIT(0)) /** @} */ /** @@ -63,32 +331,16 @@ */ #define MGBE_MAC_TMCR 0x0000 #define MGBE_MAC_RMCR 0x0004 -#define MGBE_MAC_PFR 0x0008 -#define MGBE_MAC_HTR_REG(x) ((0x0004U * (x)) + 0x0010U) #define MGBE_MAC_VLAN_TR 0x0050 #define MGBE_MAC_VLANTIR 0x0060 -#define MGBE_MAC_QX_TX_FLW_CTRL(x) ((0x0004U * (x)) + 0x0070U) -#define MGBE_MAC_RX_FLW_CTRL 0x0090 -#define MGBE_MAC_RQC4R 0x0094 #define MGBE_MAC_RQC0R 0x00A0 #define MGBE_MAC_RQC1R 0x00A4 -#define MGBE_MAC_RQC2R 0x00A8 #define MGBE_MAC_ISR 0x00B0 #define MGBE_MAC_IER 0x00B4 -#define MGBE_MAC_RX_TX_STS 0x00B8 -#define MGBE_MAC_PMTCSR 0x00C0 -#define MGBE_MAC_LPI_CSR 0x00D0 -#define MGBE_MAC_LPI_TIMER_CTRL 0x00D4 -#define MGBE_MAC_LPI_EN_TIMER 0x00D8 -#define MGBE_MAC_1US_TIC_COUNT 0x00DC #define MGBE_MAC_EXT_CNF 0x0140 #define MGBE_MDIO_SCCD 0x0204 #define MGBE_MDIO_SCCA 0x0200 -#define MGBE_MAC_FPE_CTS 0x0280 -#define MGBE_MAC_CSR_SW_CTL 0x0290 -#define MGBE_MAC_MA0HR 0x0300 #define MGBE_MAC_ADDRH(x) ((0x0008U * (x)) + 0x0300U) -#define MGBE_MAC_MA0LR 0x0304 #define MGBE_MAC_ADDRL(x) ((0x0008U * (x)) + 0x0304U) #define MGBE_MAC_INDIR_AC 0x0700 #define MGBE_MAC_INDIR_DATA 0x0704 @@ -97,14 +349,8 @@ #define MGBE_MMC_CNTRL 0x0800 #define MGBE_MAC_L3L4_ADDR_CTR 0x0C00 #define MGBE_MAC_L3L4_DATA 0x0C04 -#define MGBE_MAC_ARPPA 0x0C10 -#define MGBE_MAC_RSS_CTRL 0x0C80 -#define MGBE_MAC_RSS_ADDR 0x0C88 -#define MGBE_MAC_RSS_DATA 0x0C8C #define MGBE_MAC_TCR 0x0D00 #define MGBE_MAC_SSIR 0x0D04 -#define MGBE_MAC_STSR 0x0D08 -#define MGBE_MAC_STNSR 0x0D0C #define MGBE_MAC_STSUR 0x0D10 #define MGBE_MAC_STNSUR 0x0D14 #define MGBE_MAC_TAR 0x0D18 @@ -113,10 +359,6 @@ #define MGBE_MAC_TSSEC 0x0D34 #define MGBE_MAC_TSPKID 0x0D38 #define MGBE_MAC_PPS_CTL 0x0D70 -#define MGBE_MAC_PTO_CR 0x0DC0 -#define MGBE_MAC_PIDR0 0x0DC4 -#define MGBE_MAC_PIDR1 0x0DC8 -#define MGBE_MAC_PIDR2 0x0DCC /** @} */ /** @@ -125,36 +367,30 @@ * @brief MGBE Wrapper register offsets * @{ */ -#define MGBE_WRAP_AXI_ASID0_CTRL 0x8400 -#define MGBE_WRAP_AXI_ASID1_CTRL 0x8404 -#define MGBE_WRAP_AXI_ASID2_CTRL 0x8408 #define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704 + +#ifdef HSI_SUPPORT #define MGBE_REGISTER_PARITY_ERR OSI_BIT(5) #define MGBE_CORE_CORRECTABLE_ERR OSI_BIT(4) #define MGBE_CORE_UNCORRECTABLE_ERR OSI_BIT(3) + +#define MGBE_MTL_DEBUG_CONTROL 0x1008U +#define MGBE_MTL_DEBUG_CONTROL_FDBGEN OSI_BIT(0) +#define MGBE_MTL_DEBUG_CONTROL_DBGMOD OSI_BIT(1) +#define MGBE_MTL_DEBUG_CONTROL_FIFORDEN OSI_BIT(10) +#define MGBE_MTL_DEBUG_CONTROL_EIEE OSI_BIT(16) +#define MGBE_MTL_DEBUG_CONTROL_EIEC OSI_BIT(18) + +#endif #define MGBE_MAC_SBD_INTR OSI_BIT(2) -#define MGBE_WRAP_COMMON_INTR_STATUS 0x8708 +#define MGBE_WRAP_COMMON_INTR_STATUS 0x8708 #define MGBE_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U)) #define MGBE_VIRTUAL_APB_ERR_CTRL 0x8300 -#define MGBE_WRAP_SYNC_TSC_PTP_CAPTURE 0x800CU -#define MGBE_WRAP_TSC_CAPTURE_LOW 0x8010U -#define MGBE_WRAP_TSC_CAPTURE_HIGH 0x8014U -#define MGBE_WRAP_PTP_CAPTURE_LOW 0x8018U -#define MGBE_WRAP_PTP_CAPTURE_HIGH 0x801CU /** @} */ -/** - * @addtogroup MGBE MAC hash table defines - * - * @brief MGBE MAC hash table Control register - * filed type defines. - * @{ - */ -#define MGBE_MAX_HTR_REGS 4U -/** @} */ /** - * @addtogroup MGBE MAC Mode Select Group + * @addtogroup MGBE-MAC-MODE MAC Mode Select Group * * @brief MGBE MAC Indirect Access control and status for * Mode Select type defines. @@ -165,13 +401,6 @@ #define MGBE_MAC_INDIR_AC_OB_RETRY 10U #define MGBE_MAC_DCHSEL 0U -#define MGBE_MAC_PCCTRL 1U -#define MGBE_MAC_PCNTRL 2U -#define MGBE_MAC_DPCSEL 3U -#define MGBE_MAC_VPCSEL 4U -#define MGBE_MAC_LPCSEL 5U -#define MGBE_MAC_APCSEL 6U -#define MGBE_MAC_PC_STATUS 7U /* MGBE_MAC_INDIR_AC register defines */ #define MGBE_MAC_INDIR_AC_MSEL (OSI_BIT(19) | OSI_BIT(18) | \ @@ -182,84 +411,29 @@ OSI_BIT(11) | OSI_BIT(10) | \ OSI_BIT(9) | OSI_BIT(8)) #define MGBE_MAC_INDIR_AC_AOFF_SHIFT 8U -#define MGBE_MAC_INDIR_AC_AUTO OSI_BIT(5) #define MGBE_MAC_INDIR_AC_CMD OSI_BIT(1) #define MGBE_MAC_INDIR_AC_OB OSI_BIT(0) /** @} */ /** - * @addtogroup MGBE MAC L3L4 defines + * @addtogroup MGBE-L3L4 MAC L3L4 defines * * @brief MGBE L3L4 Address Control register * IDDR filter filed type defines * @{ */ -#define MGBE_MAX_VLAN_FILTER 32U #define MGBE_MAC_XB_WAIT 10U #define MGBE_MAC_L3L4_CTR 0x0 -#define MGBE_MAC_L4_ADDR 0x1 -#define MGBE_MAC_L3_AD0R 0x4 #define MGBE_MAC_L3_AD1R 0x5 +#ifndef OSI_STRIPPED_LIB +#define MGBE_MAC_L3_AD0R 0x4 #define MGBE_MAC_L3_AD2R 0x6 #define MGBE_MAC_L3_AD3R 0x7 - -#define MGBE_MAC_L3L4_CTR_DMCHEN0 OSI_BIT(31) -#define MGBE_MAC_L3L4_CTR_DMCHEN0_SHIFT 31 -#define MGBE_MAC_L3L4_CTR_DMCHN0 (OSI_BIT(24) | OSI_BIT(25) | \ - OSI_BIT(26) | OSI_BIT(27)) -#define MGBE_MAC_L3L4_CTR_DMCHN0_SHIFT 24 -#define MGBE_MAC_L3L4_CTR_L4DPIM0 OSI_BIT(21) -#define MGBE_MAC_L3L4_CTR_L4DPIM0_SHIFT 21 -#define MGBE_MAC_L3L4_CTR_L4DPM0 OSI_BIT(20) -#define MGBE_MAC_L3L4_CTR_L4SPIM0 OSI_BIT(19) -#define MGBE_MAC_L3L4_CTR_L4SPIM0_SHIFT 19 -#define MGBE_MAC_L3L4_CTR_L4SPM0 OSI_BIT(18) -#define MGBE_MAC_L3L4_CTR_L4PEN0 OSI_BIT(16) -#define MGBE_MAC_L3L4_CTR_L3HDBM0 (OSI_BIT(11) | OSI_BIT(12) | \ - OSI_BIT(13) | OSI_BIT(14) | \ - OSI_BIT(15)) -#define MGBE_MAC_L3L4_CTR_L3HSBM0 (OSI_BIT(6) | OSI_BIT(7) | \ - OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10)) -#define MGBE_MAC_L3L4_CTR_L3DAIM0 OSI_BIT(5) -#define MGBE_MAC_L3L4_CTR_L3DAIM0_SHIFT 5 -#define MGBE_MAC_L3L4_CTR_L3DAM0 OSI_BIT(4) -#define MGBE_MAC_L3L4_CTR_L3SAIM0 OSI_BIT(3) -#define MGBE_MAC_L3L4_CTR_L3SAIM0_SHIFT 3 -#define MGBE_MAC_L3L4_CTR_L3SAM0 OSI_BIT(2) -#define MGBE_MAC_L3L4_CTR_L3PEN0 OSI_BIT(0) -#define MGBE_MAC_L3_IP6_CTRL_CLEAR (MGBE_MAC_L3L4_CTR_L3SAM0 | \ - MGBE_MAC_L3L4_CTR_L3SAIM0 | \ - MGBE_MAC_L3L4_CTR_L3DAM0 | \ - MGBE_MAC_L3L4_CTR_L3DAIM0 | \ - MGBE_MAC_L3L4_CTR_DMCHEN0 | \ - MGBE_MAC_L3L4_CTR_DMCHN0) -#define MGBE_MAC_L3_IP4_SA_CTRL_CLEAR (MGBE_MAC_L3L4_CTR_L3SAM0 | \ - MGBE_MAC_L3L4_CTR_L3SAIM0 | \ - MGBE_MAC_L3L4_CTR_DMCHEN0 | \ - MGBE_MAC_L3L4_CTR_DMCHN0) -#define MGBE_MAC_L3_IP4_DA_CTRL_CLEAR (MGBE_MAC_L3L4_CTR_L3DAM0 | \ - MGBE_MAC_L3L4_CTR_L3DAIM0 | \ - MGBE_MAC_L3L4_CTR_DMCHEN0 | \ - MGBE_MAC_L3L4_CTR_DMCHN0) -#define MGBE_MAC_L4_SP_CTRL_CLEAR (MGBE_MAC_L3L4_CTR_L4SPM0 | \ - MGBE_MAC_L3L4_CTR_L4SPIM0 | \ - MGBE_MAC_L3L4_CTR_DMCHEN0 | \ - MGBE_MAC_L3L4_CTR_DMCHN0) -#define MGBE_MAC_L4_DP_CTRL_CLEAR (MGBE_MAC_L3L4_CTR_L4DPM0 | \ - MGBE_MAC_L3L4_CTR_L4DPIM0 | \ - MGBE_MAC_L3L4_CTR_DMCHEN0 | \ - MGBE_MAC_L3L4_CTR_DMCHN0) -#define MGBE_MAC_L3L4_CTRL_ALL (MGBE_MAC_L3_IP6_CTRL_CLEAR | \ - MGBE_MAC_L3_IP4_SA_CTRL_CLEAR | \ - MGBE_MAC_L3_IP4_DA_CTRL_CLEAR | \ - MGBE_MAC_L4_SP_CTRL_CLEAR | \ - MGBE_MAC_L4_DP_CTRL_CLEAR) +#define MGBE_MAC_L4_ADDR 0x1 #define MGBE_MAC_L4_ADDR_SP_MASK 0x0000FFFFU #define MGBE_MAC_L4_ADDR_DP_MASK 0xFFFF0000U #define MGBE_MAC_L4_ADDR_DP_SHIFT 16 -#define MGBE_MAC_PPS_CTL_PPSCTRL0 (OSI_BIT(3) | OSI_BIT(2) |\ - OSI_BIT(1) | OSI_BIT(0)) +#endif /* !OSI_STRIPPED_LIB */ /** @} */ /** @@ -283,118 +457,16 @@ * @brief MGBE MTL register offsets * @{ */ -#define MGBE_MTL_OP_MODE 0x1000 -#define MGBE_MTL_INTR_STATUS 0x1020 #define MGBE_MTL_RXQ_DMA_MAP0 0x1030 #define MGBE_MTL_RXQ_DMA_MAP1 0x1034 #define MGBE_MTL_RXQ_DMA_MAP2 0x1038 -#define MGBE_MTL_RXQ_DMA_MAP3 0x103b -#define MGBE_MTL_EST_CONTROL 0x1050 -#define MGBE_MTL_EST_OVERHEAD 0x1054 -#define MGBE_MTL_EST_STATUS 0x1058 -#define MGBE_MTL_EST_SCH_ERR 0x1060 -#define MGBE_MTL_EST_FRMS_ERR 0x1064 -#define MGBE_MTL_EST_FRMC_ERR 0x1068 -#define MGBE_MTL_EST_ITRE 0x1070 -#define MGBE_MTL_EST_GCL_CONTROL 0x1080 -#define MGBE_MTL_EST_DATA 0x1084 -#define MGBE_MTL_FPE_CTS 0x1090 -#define MGBE_MTL_FPE_ADV 0x1094 #define MGBE_MTL_CHX_TX_OP_MODE(x) ((0x0080U * (x)) + 0x1100U) #define MGBE_MTL_TCQ_ETS_CR(x) ((0x0080U * (x)) + 0x1110U) #define MGBE_MTL_TCQ_QW(x) ((0x0080U * (x)) + 0x1118U) -#define MGBE_MTL_TCQ_ETS_SSCR(x) ((0x0080U * (x)) + 0x111CU) -#define MGBE_MTL_TCQ_ETS_HCR(x) ((0x0080U * (x)) + 0x1120U) -#define MGBE_MTL_TCQ_ETS_LCR(x) ((0x0080U * (x)) + 0x1124U) #define MGBE_MTL_CHX_RX_OP_MODE(x) ((0x0080U * (x)) + 0x1140U) #define MGBE_MTL_RXQ_FLOW_CTRL(x) ((0x0080U * (x)) + 0x1150U) -#define MGBE_MTL_QINT_ENABLE(x) ((0x0080U * (x)) + 0x1170U) -#define MGBE_MTL_QINT_STATUS(x) ((0x0080U * (x)) + 0x1174U) -#define MGBE_MTL_TC_PRTY_MAP0 0x1040 -#define MGBE_MTL_TC_PRTY_MAP1 0x1044 -#define MGBE_MTL_RXP_CS 0x10A0 -#define MGBE_MTL_RXP_INTR_CS 0x10A4 -#define MGBE_MTL_RXP_IND_CS 0x10B0 -#define MGBE_MTL_RXP_IND_DATA 0x10B4 /** @} */ -/** - * @addtogroup MGBE-MTL FRP Indirect Access register defines - * - * @brief MGBE MTL register offsets - * @{ - */ -#define MGBE_MTL_FRP_READ_UDELAY 1U -#define MGBE_MTL_FRP_READ_RETRY 1000U - -#define MGBE_MTL_OP_MODE_FRPE OSI_BIT(15) -/* FRP Control and Status register defines */ -#define MGBE_MTL_RXP_CS_RXPI OSI_BIT(31) -#define MGBE_MTL_RXP_CS_PIPE (OSI_BIT(30) | OSI_BIT(29) | \ - OSI_BIT(28)) -#define MGBE_MTL_RXP_CS_NPE (OSI_BIT(23) | OSI_BIT(22) | \ - OSI_BIT(21) | OSI_BIT(20) | \ - OSI_BIT(19) | OSI_BIT(18) | \ - OSI_BIT(17) | OSI_BIT(16)) -#define MGBE_MTL_RXP_CS_NPE_SHIFT 16U -#define MGBE_MTL_RXP_CS_FPE_RCH (OSI_BIT(15) | OSI_BIT(14) | \ - OSI_BIT(13) | OSI_BIT(12)) -#define MGBE_MTL_RXP_CS_NVE (OSI_BIT(7) | OSI_BIT(6) | \ - OSI_BIT(5) | OSI_BIT(4) | \ - OSI_BIT(3) | OSI_BIT(2) | \ - OSI_BIT(1) | OSI_BIT(0)) -/* FRP Interrupt Control and Status register */ -#define MGBE_MTL_RXP_INTR_CS_PDRFIE OSI_BIT(19) -#define MGBE_MTL_RXP_INTR_CS_FOOVIE OSI_BIT(18) -#define MGBE_MTL_RXP_INTR_CS_NPEOVIE OSI_BIT(17) -#define MGBE_MTL_RXP_INTR_CS_NVEOVIE OSI_BIT(16) -#define MGBE_MTL_RXP_INTR_CS_PDRFIS OSI_BIT(3) -#define MGBE_MTL_RXP_INTR_CS_FOOVIS OSI_BIT(2) -#define MGBE_MTL_RXP_INTR_CS_NPEOVIS OSI_BIT(1) -#define MGBE_MTL_RXP_INTR_CS_NVEOVIS OSI_BIT(0) -/* Indirect Instruction Table defines */ -#define MGBE_MTL_FRP_IE0(x) ((x) * 0x4U + 0x0U) -#define MGBE_MTL_FRP_IE1(x) ((x) * 0x4U + 0x1U) -#define MGBE_MTL_FRP_IE2(x) ((x) * 0x4U + 0x2U) -#define MGBE_MTL_FRP_IE3(x) ((x) * 0x4U + 0x3U) -#define MGBE_MTL_FRP_IE2_DCH (OSI_BIT(31) | OSI_BIT(30) | \ - OSI_BIT(29) | OSI_BIT(28) | \ - OSI_BIT(27) | OSI_BIT(26) | \ - OSI_BIT(25) | OSI_BIT(24)) -#define MGBE_MTL_FRP_IE2_DCH_SHIFT 24U -#define MGBE_MTL_FRP_IE2_DCH_MASK 0xFFU -#define MGBE_MTL_FRP_IE2_OKI (OSI_BIT(23) | OSI_BIT(22) | \ - OSI_BIT(21) | OSI_BIT(20) | \ - OSI_BIT(19) | OSI_BIT(18) | \ - OSI_BIT(17) | OSI_BIT(16)) -#define MGBE_MTL_FRP_IE2_OKI_SHIFT 16U -#define MGBE_MTL_FRP_IE2_FO (OSI_BIT(13) | OSI_BIT(12) | \ - OSI_BIT(11) | OSI_BIT(10) | \ - OSI_BIT(9) | OSI_BIT(8)) -#define MGBE_MTL_FRP_IE2_FO_SHIFT 8U -#define MGBE_MTL_FRP_IE2_NC OSI_BIT(3) -#define MGBE_MTL_FRP_IE2_IM OSI_BIT(2) -#define MGBE_MTL_FRP_IE2_RF OSI_BIT(1) -#define MGBE_MTL_FRP_IE2_AF OSI_BIT(0) -#define MGBE_MTL_FRP_IE3_DCH_MASK 0xFFFFU -/* Indirect register defines */ -#define MGBE_MTL_RXP_DROP_CNT 0U -#define MGBE_MTL_RXP_ERROR_CNT 1U -#define MGBE_MTL_RXP_BYPASS_CNT 2U -#define MGBE_MTL_RXP_ACCEPT_CNT(x) ((0x10 * (x)) + 0x40) -#define MGBE_MTL_RXP_IND_CS_BUSY OSI_BIT(31) -#define MGBE_MTL_RXP_IND_CS_ACCSEL OSI_BIT(24) -#define MGBE_MTL_RXP_IND_CS_RXPEIEC (OSI_BIT(22) | OSI_BIT(21)) -#define MGBE_MTL_RXP_IND_CS_RXPEIEE OSI_BIT(20) -#define MGBE_MTL_RXP_IND_CS_CRWEN OSI_BIT(18) -#define MGBE_MTL_RXP_IND_CS_CRWSEL OSI_BIT(17) -#define MGBE_MTL_RXP_IND_CS_WRRDN OSI_BIT(16) -#define MGBE_MTL_RXP_IND_CS_ADDR (OSI_BIT(9) | OSI_BIT(8) | \ - OSI_BIT(7) | OSI_BIT(6) | \ - OSI_BIT(5) | OSI_BIT(4) | \ - OSI_BIT(3) | OSI_BIT(2) | \ - OSI_BIT(1) | OSI_BIT(0)) -/** @} */ /** * @addtogroup HW Register BIT values @@ -402,38 +474,16 @@ * @brief consists of corresponding MGBE MAC, MTL register bit values * @{ */ -#define MGBE_DMA_MODE_SWR OSI_BIT(0) -#define MGBE_MTL_TCQ_ETS_CR_SLC_MASK (OSI_BIT(6) | OSI_BIT(5) | \ - OSI_BIT(4)) -#define MGBE_MTL_TCQ_ETS_CR_CC OSI_BIT(3) -#define MGBE_MTL_TCQ_ETS_CR_CC_SHIFT 3U -#define MGBE_MTL_TCQ_ETS_CR_AVALG (OSI_BIT(1) | OSI_BIT(0)) -#define MGBE_MTL_TCQ_ETS_CR_AVALG_SHIFT 0U -#define MGBE_MTL_TCQ_ETS_QW_ISCQW_MASK 0x001FFFFFU -#define MGBE_MTL_TCQ_ETS_SSCR_SSC_MASK 0x0000FFFFU -#define MGBE_MTL_TCQ_ETS_HCR_HC_MASK 0x1FFFFFFFU -#define MGBE_MTL_TCQ_ETS_LCR_LC_MASK 0x1FFFFFFFU -#define MGBE_MTL_TX_OP_MODE_Q2TCMAP (OSI_BIT(10) | OSI_BIT(9) |\ - OSI_BIT(8)) -#define MGBE_MTL_TX_OP_MODE_Q2TCMAP_SHIFT 8U -#define MGBE_MTL_TX_OP_MODE_TXQEN (OSI_BIT(3) | OSI_BIT(2)) -#define MGBE_MTL_TX_OP_MODE_TXQEN_SHIFT 2U #define MGBE_MTL_CHX_TX_OP_MODE_Q2TC_SH 8U -#define MGBE_MTL_QTOMR_FTQ OSI_BIT(0) -#define MGBE_MTL_QTOMR_FTQ_LPOS OSI_BIT(0) #define MGBE_MTL_TSF OSI_BIT(1) #define MGBE_MTL_TXQEN OSI_BIT(3) #define MGBE_MTL_RSF OSI_BIT(5) #define MGBE_MTL_TCQ_QW_ISCQW OSI_BIT(4) -#define MGBE_MTL_QINT_TXUNIFS OSI_BIT(0) -#define MGBE_MTL_QINT_TXUIE OSI_BIT(0) #define MGBE_MAC_RMCR_ACS OSI_BIT(1) #define MGBE_MAC_RMCR_CST OSI_BIT(2) #define MGBE_MAC_RMCR_IPC OSI_BIT(9) #define MGBE_MAC_RXQC0_RXQEN_MASK 0x3U #define MGBE_MAC_RXQC0_RXQEN_SHIFT(x) ((x) * 2U) -#define MGBE_MAC_RMCR_LM OSI_BIT(10) -#define MGBE_MAC_RMCR_ARPEN OSI_BIT(31) #define MGBE_MDIO_SCCD_SBUSY OSI_BIT(22) #define MGBE_MDIO_SCCA_DA_SHIFT 21U #define MGBE_MDIO_SCCA_DA_MASK 0x1FU @@ -450,65 +500,24 @@ #define MGBE_MAC_RMCR_GPSLCE OSI_BIT(6) #define MGBE_MAC_RMCR_WD OSI_BIT(7) #define MGBE_MAC_RMCR_JE OSI_BIT(8) -#define MGBE_MAC_TMCR_IFP OSI_BIT(11) #define MGBE_MAC_TMCR_DDIC OSI_BIT(1) -#define MGBE_MAC_TMCR_IPG_MASK 0x700U #define MGBE_MAC_TMCR_JD OSI_BIT(16) #define MGBE_MMC_CNTRL_CNTRST OSI_BIT(0) #define MGBE_MMC_CNTRL_RSTONRD OSI_BIT(2) #define MGBE_MMC_CNTRL_CNTMCT (OSI_BIT(4) | OSI_BIT(5)) #define MGBE_MMC_CNTRL_CNTPRST OSI_BIT(7) -#define MGBE_MAC_RQC1R_PTPQ (OSI_BIT(27) | OSI_BIT(26) | \ - OSI_BIT(25) | OSI_BIT(24)) -#define MGBE_MAC_RQC1R_PTPQ_SHIFT 24U -#define MGBE_MAC_RQC1R_TPQC1 OSI_BIT(22) -#define MGBE_MAC_RQC1R_TPQC0 OSI_BIT(21) -#define MGBE_MAC_RQC1R_OMCBCQ OSI_BIT(20) #define MGBE_MAC_RQC1R_MCBCQEN OSI_BIT(15) #define MGBE_MAC_RQC1R_MCBCQ (OSI_BIT(11) | OSI_BIT(10) | \ OSI_BIT(9) | OSI_BIT(8)) #define MGBE_MAC_RQC1R_MCBCQ_SHIFT 8U -#define MGBE_MAC_RQC1R_MCBCQ_DEFAULT 9U -#define MGBE_MAC_RQC1R_RQ (OSI_BIT(7) | OSI_BIT(6) | \ - OSI_BIT(5) | OSI_BIT(4)) -#define MGBE_MAC_RQC1R_RQ_SHIFT 4U -#define MGBE_MAC_RQC4R_PMCBCQ (OSI_BIT(27) | OSI_BIT(26) | \ - OSI_BIT(25) | OSI_BIT(24)) -#define MGBE_MAC_RQC4R_PMCBCQ_SHIFT 24U #define MGBE_IMR_RGSMIIIE OSI_BIT(0) #define MGBE_IMR_TSIE OSI_BIT(12) -#define MGBE_IMR_TXESIE OSI_BIT(13) -#define MGBE_IMR_FPEIE OSI_BIT(15) -#define MGBE_MAC_IMR_FPEIS OSI_BIT(16) #define MGBE_ISR_TSIS OSI_BIT(12) -#define MGBE_DMA_ISR_MTLIS OSI_BIT(16) #define MGBE_DMA_ISR_MACIS OSI_BIT(17) #define MGBE_DMA_ISR_DCH0_DCH15_MASK 0x3FFU -#define MGBE_DMA_CHX_STATUS_TPS OSI_BIT(1) -#define MGBE_DMA_CHX_STATUS_TBU OSI_BIT(2) -#define MGBE_DMA_CHX_STATUS_RBU OSI_BIT(7) -#define MGBE_DMA_CHX_STATUS_RPS OSI_BIT(8) -#define MGBE_DMA_CHX_STATUS_FBE OSI_BIT(12) #define MGBE_DMA_CHX_STATUS_TI OSI_BIT(0) #define MGBE_DMA_CHX_STATUS_RI OSI_BIT(6) -#define MGBE_MAC_PFR_PR OSI_BIT(0) -#define MGBE_MAC_PFR_HUC OSI_BIT(1) -#define MGBE_MAC_PFR_DAIF OSI_BIT(3) -#define MGBE_MAC_PFR_PM OSI_BIT(4) -#define MGBE_MAC_PFR_DBF OSI_BIT(5) -#define MGBE_MAC_PFR_PCF (OSI_BIT(6) | OSI_BIT(7)) -#define MGBE_MAC_PFR_SAIF OSI_BIT(8) -#define MGBE_MAC_PFR_SAF OSI_BIT(9) -#define MGBE_MAC_PFR_HPF OSI_BIT(10) -#define MGBE_MAC_PFR_VTFE OSI_BIT(16) -#define MGBE_MAC_PFR_VTFE_SHIFT 16 -#define MGBE_MAC_PFR_IPFE OSI_BIT(20) -#define MGBE_MAC_PFR_IPFE_SHIFT 20 -#define MGBE_MAC_PFR_DNTU OSI_BIT(21) -#define MGBE_MAC_PFR_VUCC OSI_BIT(22) -#define MGBE_MAC_PFR_RA OSI_BIT(31) #define MGBE_MAC_ADDRH_AE OSI_BIT(31) -#define MGBE_MAC_ADDRH_AE_SHIFT 31 #define MGBE_MAC_ADDRH_SA OSI_BIT(30) #define MGBE_MAC_ADDRH_SA_SHIFT 30 #define MGBE_MAB_ADDRH_MBC_MAX_MASK 0x3FU @@ -529,30 +538,15 @@ #define MGBE_MAC_L3L4_ADDR_CTR_IDDR_FTYPE_SHIFT 8 #define MGBE_MAC_L3L4_ADDR_CTR_TT OSI_BIT(1) #define MGBE_MAC_L3L4_ADDR_CTR_XB OSI_BIT(0) -#define MGBE_MAC_VLAN_TR_ETV OSI_BIT(16) -#define MGBE_MAC_VLAN_TR_VTIM OSI_BIT(17) -#define MGBE_MAC_VLAN_TR_VTIM_SHIFT 17 -#define MGBE_MAC_VLAN_TR_VTHM OSI_BIT(25) -#define MGBE_MAC_VLANTR_EVLS_ALWAYS_STRIP ((unsigned int)0x3 << 21U) +#define MGBE_MAC_VLANTR_EVLS_ALWAYS_STRIP ((nveu32_t)0x3 << 21U) #define MGBE_MAC_VLANTR_EVLRXS OSI_BIT(24) #define MGBE_MAC_VLANTR_DOVLTC OSI_BIT(20) #define MGBE_MAC_VLANTIR_VLTI OSI_BIT(20) #define MGBE_MAC_VLANTIRR_CSVL OSI_BIT(19) -#define MGBE_MAC_LPI_CSR_LPITE OSI_BIT(20) -#define MGBE_MAC_LPI_CSR_LPITXA OSI_BIT(19) -#define MGBE_MAC_LPI_CSR_PLS OSI_BIT(17) -#define MGBE_MAC_LPI_CSR_LPIEN OSI_BIT(16) -#define MGBE_MAC_RSS_CTRL_RSSE OSI_BIT(0) -#define MGBE_MAC_RSS_CTRL_IP2TE OSI_BIT(1) -#define MGBE_MAC_RSS_CTRL_TCP4TE OSI_BIT(2) -#define MGBE_MAC_RSS_CTRL_UDP4TE OSI_BIT(3) -#define MGBE_MAC_RSS_ADDR_ADDRT OSI_BIT(2) -#define MGBE_MAC_RSS_ADDR_RSSIA_SHIFT 8U -#define MGBE_MAC_RSS_ADDR_OB OSI_BIT(0) -#define MGBE_MAC_RSS_ADDR_CT OSI_BIT(1) -#define MGBE_MAC_TX_TJT OSI_BIT(0) -#define MGBE_MAC_TX_IHE OSI_BIT(12) -#define MGBE_MAC_TX_PCE OSI_BIT(13) +#define MGBE_MAC_ISR_LSI OSI_BIT(0) +#define MGBE_MAC_ISR_LS_MASK (OSI_BIT(25) | OSI_BIT(24)) +#define MGBE_MAC_ISR_LS_LOCAL_FAULT OSI_BIT(25) +#define MGBE_MAC_ISR_LS_LINK_OK 0U /* DMA SBUS */ #define MGBE_DMA_SBUS_UNDEF OSI_BIT(0) #define MGBE_DMA_SBUS_BLEN256 OSI_BIT(7) @@ -561,8 +555,6 @@ #define MGBE_DMA_SBUS_WR_OSR_LMT 0x3F000000U #define MGBE_DMA_TX_EDMA_CTRL_TDPS 0x00000005U #define MGBE_DMA_RX_EDMA_CTRL_RDPS 0x00000005U -#define MGBE_DMA_TX_EDMA_CTRL_TDPS_PRESI 0x00000003U -#define MGBE_DMA_RX_EDMA_CTRL_RDPS_PRESI 0x00000003U #define MGBE_MAC_TMCR_SS_2_5G (OSI_BIT(31) | OSI_BIT(30)) #define MGBE_MAC_TMCR_SS_5G (OSI_BIT(31) | OSI_BIT(29)) #define MGBE_MAC_TMCR_SS_10G (OSI_BIT(31) | OSI_BIT(30) | OSI_BIT(29)) @@ -573,197 +565,55 @@ #define MGBE_RXQ_TO_DMA_CHAN_MAP0 0x03020100U #define MGBE_RXQ_TO_DMA_CHAN_MAP1 0x07060504U #define MGBE_RXQ_TO_DMA_CHAN_MAP2 0x0B0A0908U -#define MGBE_RXQ_TO_DMA_CHAN_MAP3 0x0F0E0D0CU #define MGBE_RXQ_TO_DMA_MAP_DDMACH 0x80808080U #define MGBE_MTL_TXQ_SIZE_SHIFT 16U #define MGBE_MTL_RXQ_SIZE_SHIFT 16U #define MGBE_MAC_RMCR_GPSL_MSK 0x3FFF0000U -#define MGBE_MTL_RXQ_OP_MODE_FEP OSI_BIT(4) -#define MGBE_MAC_TCR_TSCFUPDT OSI_BIT(1) -#define MGBE_MAC_TCR_TSINIT OSI_BIT(2) #define MGBE_MAC_TCR_TSUPDT OSI_BIT(3) -#define MGBE_MAC_TCR_TSADDREG OSI_BIT(5) -#define MGBE_MAC_TCR_TSCTRLSSR OSI_BIT(9) -#define MGBE_MAC_TCR_TSENMACADDR OSI_BIT(18) -#define MGBE_MAC_TCR_SNAPTYPSEL_SHIFT 16U #define MGBE_MAC_STNSUR_ADDSUB_SHIFT 31U -#define MGBE_MAC_SSIR_SSINC_SHIFT 16U -#define MGBE_MAC_STNSR_TSSS_MASK 0x7FFFFFFFU -#define MGBE_MAC_PTO_CR_PTOEN OSI_BIT(0) -#define MGBE_MAC_PTO_CR_ASYNCEN OSI_BIT(1) -#define MGBE_MAC_PTO_CR_APDREQEN OSI_BIT(2) -#define MGBE_MAC_PTO_CR_DN (OSI_BIT(15) | OSI_BIT(14) | \ - OSI_BIT(13) | OSI_BIT(12) | \ - OSI_BIT(11) | OSI_BIT(10) | \ - OSI_BIT(9) | OSI_BIT(8)) -#define MGBE_MAC_PTO_CR_DN_SHIFT 8U -#define MGBE_MAC_PIDR_PID_MASK 0XFFFFU -#define MGBE_MAC_QX_TX_FLW_CTRL_TFE OSI_BIT(1) -#define MGBE_MAC_RX_FLW_CTRL_RFE OSI_BIT(0) -#define MGBE_MAC_PAUSE_TIME 0xFFFF0000U -#define MGBE_MAC_PAUSE_TIME_MASK 0xFFFF0000U #define MGBE_MTL_RXQ_OP_MODE_EHFC OSI_BIT(7) #define MGBE_MTL_RXQ_OP_MODE_RFA_SHIFT 1U #define MGBE_MTL_RXQ_OP_MODE_RFA_MASK 0x0000007EU #define MGBE_MTL_RXQ_OP_MODE_RFD_SHIFT 17U #define MGBE_MTL_RXQ_OP_MODE_RFD_MASK 0x007E0000U -/* MAC FPE control/statusOSI_BITmap */ -#define MGBE_MAC_FPE_CTS_EFPE OSI_BIT(0) -#define MGBE_MAC_FPE_CTS_TRSP OSI_BIT(19) -#define MGBE_MAC_FPE_CTS_TVER OSI_BIT(18) -#define MGBE_MAC_FPE_CTS_RRSP OSI_BIT(17) -#define MGBE_MAC_FPE_CTS_RVER OSI_BIT(16) -#define MGBE_MAC_FPE_CTS_SVER OSI_BIT(1) -#define MGBE_MAC_FPE_CTS_SRSP OSI_BIT(2) -/* MTL_FPE_CTRL_STS */ -#define MGBE_MTL_FPE_CTS_PEC (OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10) | OSI_BIT(11) | \ - OSI_BIT(12) | OSI_BIT(13) | \ - OSI_BIT(14) | OSI_BIT(15)) -#define MGBE_MTL_FPE_CTS_PEC_SHIFT 8U -#define MGBE_MTL_FPE_CTS_PEC_MAX_SHIFT 16U -/* MTL FPE adv registers */ -#define MGBE_MTL_FPE_ADV_HADV_MASK (0xFFFFU) -#define MGBE_MTL_FPE_ADV_HADV_VAL 100U -/* MTL_EST_CONTROL */ -#define MGBE_MTL_EST_CONTROL_PTOV (OSI_BIT(23) | OSI_BIT(24) | \ - OSI_BIT(25) | OSI_BIT(26) | \ - OSI_BIT(27) | OSI_BIT(28) | \ - OSI_BIT(29) | OSI_BIT(30) | \ - OSI_BIT(31)) -#define MGBE_MTL_EST_CONTROL_PTOV_SHIFT 23U -#define MGBE_MTL_EST_PTOV_RECOMMEND 32U -#define MGBE_MTL_EST_CONTROL_CTOV (OSI_BIT(11) | OSI_BIT(12) | \ - OSI_BIT(13) | OSI_BIT(14) | \ - OSI_BIT(15) | OSI_BIT(16) | \ - OSI_BIT(17) | OSI_BIT(18) | \ - OSI_BIT(19) | OSI_BIT(20) | \ - OSI_BIT(21) | OSI_BIT(22)) -#define MGBE_MTL_EST_CONTROL_CTOV_SHIFT 11U -#define MGBE_MTL_EST_CTOV_RECOMMEND 42U -#define MGBE_8PTP_CYCLE 26U -#ifdef MACSEC_SUPPORT +#if defined(MACSEC_SUPPORT) && !defined(OSI_STRIPPED_LIB) /** * MACSEC Recommended value * By default PCS and UPHY are present */ #define MGBE_MTL_EST_CTOV_MACSEC_RECOMMEND 295U #endif /* MACSEC_SUPPORT */ -#define MGBE_MTL_EST_CONTROL_TILS (OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10)) #define MGBE_MTL_EST_CONTROL_LCSE (OSI_BIT(7) | OSI_BIT(6)) #define MGBE_MTL_EST_CONTROL_LCSE_VAL 0U -#define MGBE_MTL_EST_CONTROL_LCSE_SHIFT 6U #define MGBE_MTL_EST_CONTROL_DDBF OSI_BIT(4) -#define MGBE_MTL_EST_CONTROL_SSWL OSI_BIT(1) #define MGBE_MTL_EST_OVERHEAD_OVHD (OSI_BIT(0) | OSI_BIT(1) | \ OSI_BIT(2) | OSI_BIT(3) | \ OSI_BIT(4) | OSI_BIT(5)) #define MGBE_MTL_EST_OVERHEAD_RECOMMEND 56U -/* EST controlOSI_BITmap */ -#define MGBE_MTL_EST_EEST OSI_BIT(0) -#define MGBE_MTL_EST_SSWL OSI_BIT(1) -#define MGBE_MTL_EST_QHLBF OSI_BIT(3) /* EST GCL controlOSI_BITmap */ #define MGBE_MTL_EST_ADDR_SHIFT 8 -#define MGBE_MTL_EST_ADDR_MASK (OSI_BIT(8) | OSI_BIT(9) | \ - OSI_BIT(10) | OSI_BIT(11) | \ - OSI_BIT(12) | OSI_BIT(13) | \ - OSI_BIT(14) | OSI_BIT(15) | \ - OSI_BIT(16) | OSI_BIT(17) | \ - OSI_BIT(18) | OSI_BIT(19)) -#define MGBE_MTL_EST_SRWO OSI_BIT(0) -#define MGBE_MTL_EST_GCRR OSI_BIT(2) -#define MGBE_MTL_EST_ERR0 OSI_BIT(20) /* EST GCRA addresses */ -#define MGBE_MTL_EST_BTR_LOW ((unsigned int)0x0 << \ +#define MGBE_MTL_EST_BTR_LOW ((nveu32_t)0x0 << \ MGBE_MTL_EST_ADDR_SHIFT) -#define MGBE_MTL_EST_BTR_HIGH ((unsigned int)0x1 << \ +#define MGBE_MTL_EST_BTR_HIGH ((nveu32_t)0x1 << \ MGBE_MTL_EST_ADDR_SHIFT) -#define MGBE_MTL_EST_CTR_LOW ((unsigned int)0x2 << \ +#define MGBE_MTL_EST_CTR_LOW ((nveu32_t)0x2 << \ MGBE_MTL_EST_ADDR_SHIFT) -#define MGBE_MTL_EST_CTR_HIGH ((unsigned int)0x3 << \ +#define MGBE_MTL_EST_CTR_HIGH ((nveu32_t)0x3 << \ MGBE_MTL_EST_ADDR_SHIFT) -#define MGBE_MTL_EST_CTR_HIGH_MAX 0xFFU -#define MGBE_MTL_EST_TER ((unsigned int)0x4 << \ +#define MGBE_MTL_EST_TER ((nveu32_t)0x4 << \ MGBE_MTL_EST_ADDR_SHIFT) -#define MGBE_MTL_EST_LLR ((unsigned int)0x5 << \ +#define MGBE_MTL_EST_LLR ((nveu32_t)0x5 << \ MGBE_MTL_EST_ADDR_SHIFT) /*EST MTL interrupt STATUS and ERR*/ #define MGBE_MTL_IS_ESTIS OSI_BIT(18) -/* MTL_EST_STATUS*/ -#define MGBE_MTL_EST_STATUS_CGCE OSI_BIT(4) -#define MGBE_MTL_EST_STATUS_HLBS OSI_BIT(3) -#define MGBE_MTL_EST_STATUS_HLBF OSI_BIT(2) -#define MGBE_MTL_EST_STATUS_BTRE OSI_BIT(1) -#define MGBE_MTL_EST_STATUS_SWLC OSI_BIT(0) -#define MGBE_MTL_EST_ITRE_CGCE OSI_BIT(4) -#define MGBE_MTL_EST_ITRE_IEHS OSI_BIT(3) -#define MGBE_MTL_EST_ITRE_IEHF OSI_BIT(2) -#define MGBE_MTL_EST_ITRE_IEBE OSI_BIT(1) -#define MGBE_MTL_EST_ITRE_IECC OSI_BIT(0) #define MGBE_MAC_EXT_CNF_DDS OSI_BIT(7) -#define MGBE_MAC_EXT_CNF_EIPG 0x1U -#define MGBE_MAC_EXT_CNF_EIPG_MASK 0x7FU /* TX timestamp */ #define MGBE_MAC_TSS_TXTSC OSI_BIT(15) -#define MGBE0_SID ((nveu32_t)0x6U) -#define MGBE1_SID ((nveu32_t)0x49U) -#define MGBE2_SID ((nveu32_t)0x4AU) -#define MGBE3_SID ((nveu32_t)0x4BU) -#define MGBE_SID_VAL1(x) (((x) << 24U) |\ - ((x) << 16U) |\ - ((x) << 8U) |\ - (x)) -#define MGBE_SID_VAL2(x) (((x) << 8U) |\ - (x)) -/** @} */ - -/** - * @addtogroup MGBE-QUEUE QUEUE fifo size programmable values - * - * @brief Queue FIFO size programmable values - * @{ - */ -/* Formula is "Programmed value = (x + 1 )*256" - * Total Rx buf size is 192KB so 192*1024 = (x + 1)*256 - * which gives x as 0x2FF - */ -#define MGBE_19K 0x4BU /* For Ten MTL queues */ -#define MGBE_21K 0x53U /* For Nine MTL queues */ -#define MGBE_24K 0x5FU /* For Eight MTL queues */ -#define MGBE_27K 0x6BU /* For Seven MTL queues */ -#define MGBE_32K 0x7FU /* For Six MTL queues */ -#define MGBE_38K 0x97U /* For Five MTL queues */ -#define MGBE_48K 0xBFU /* For Four MTL queues */ -#define MGBE_64K 0xFFU /* For Three MTL queues */ -#define MGBE_96K 0x17FU /* For Two MTL queues */ -#define MGBE_192K 0x2FFU /* For One MTL queue */ -/** @} */ - -/** - * @addtogroup MGBE-SIZE SIZE calculation helper Macros - * - * @brief SIZE calculation defines - * @{ - */ -#define FIFO_SIZE_B(x) (x) -#define FIFO_SIZE_KB(x) ((x) * 1024U) -/** @} */ - -/** - * @addtogroup MGBE-QSIZE Queue SIZE Mapping Macros - * - * @brief Tx and Rx Queue SIZE Mapping defines - * @{ - */ -#define MGBE_TX_FIFO_SIZE_64KB 9U -#define MGBE_RX_FIFO_SIZE_64KB 9U -#define MGBE_TX_FIFO_SIZE_128KB 10U -#define MGBE_RX_FIFO_SIZE_192KB 12U /** @} */ +#ifndef OSI_STRIPPED_LIB /** * @addtogroup MGBE-HW-BACKUP * @@ -877,18 +727,22 @@ OSI_MGBE_MAX_L3_L4_FILTER + (x))) /* x varies from 0-31, 32 VLAN tag filters total */ -#define MGBE_MAC_VLAN_BAK_IDX(x) ((MGBE_MAC_L3_AD3R_BAK_IDX(0) + \ +#define MGBE_MAC_VLAN_BAK_IDX(x) ((MGBE_MAC_L3_AD3R_BAK_IDX(0U) + \ OSI_MGBE_MAX_L3_L4_FILTER + (x))) /* Add MAC_DChSel_IndReg */ -#define MGBE_MAC_DCHSEL_BAK_IDX(x) ((MGBE_MAC_VLAN_BAK_IDX(0) + \ +#define MGBE_MAC_DCHSEL_BAK_IDX(x) ((MGBE_MAC_VLAN_BAK_IDX(0U) + \ MGBE_MAX_VLAN_FILTER + 1U)) -#define MGBE_MAX_BAK_IDX ((MGBE_MAC_DCHSEL_BAK_IDX(0) + \ +#define MGBE_MAX_BAK_IDX ((MGBE_MAC_DCHSEL_BAK_IDX(0U) + \ OSI_MGBE_MAX_MAC_ADDRESS_FILTER + 1U)) /** @} */ +#endif /* !OSI_STRIPPED_LIB */ + +/* TXQ Size 128KB is divided equally across 10 MTL Queues*/ +#define TX_FIFO_SZ (((((128U * 1024U)/OSI_MGBE_MAX_NUM_QUEUES)) / 256U) - 1U) /** - * @addtogroup MGBE-MAC MGBE MAC HW feature registers + * @addtogroup MGBE-MAC-HWFR MGBE MAC HW feature registers * * @brief Helps in identifying the features that are set in MAC HW * @{ @@ -962,7 +816,6 @@ #define MGBE_MAC_HFR0_TSSTSSEL_MASK 0x3U #define MGBE_MAC_HFR0_TSSTSSEL_SHIFT 25U -#define MGBE_MAC_HFR0_SAVLANINS_MASK 0x1U #define MGBE_MAC_HFR0_SAVLANINS_SHIFT 27U #define MGBE_MAC_HFR0_VXN_MASK 0x1U @@ -1134,7 +987,6 @@ #define MGBE_MTL_ECC_TSOED OSI_BIT(4) #define MGBE_MTL_ECC_DESCED OSI_BIT(5) #define MGBE_MAC_FSM_CONTROL 0x158U -#define MGBE_TMOUTEN OSI_BIT(0) #define MGBE_PRTYEN OSI_BIT(1) #define MGBE_MAC_DPP_FSM_INTERRUPT_STATUS 0x150U #define MGBE_MTL_DPP_CONTROL 0x10E0U diff --git a/kernel/nvethernetrm/osi/core/mgbe_mmc.c b/kernel/nvethernetrm/osi/core/mgbe_mmc.c index 75ed121727..348c11aafc 100644 --- a/kernel/nvethernetrm/osi/core/mgbe_mmc.c +++ b/kernel/nvethernetrm/osi/core/mgbe_mmc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -27,7 +27,7 @@ #include "mgbe_core.h" /** - * @brief update_mmc_val - function to read register and return value to callee + * @brief mgbe_update_mmc_val - function to read register and return value to callee * * Algorithm: Read the registers, check for boundary, if more, reset * counters else return same to caller. @@ -43,12 +43,12 @@ * @retval 0 on MMC counters overflow * @retval value on current MMC counter value. */ -static inline unsigned long update_mmc_val(struct osi_core_priv_data *osi_core, - unsigned long last_value, - unsigned long offset) +static inline nveu64_t mgbe_update_mmc_val(struct osi_core_priv_data *osi_core, + nveu64_t last_value, + nveu64_t offset) { - unsigned long temp; - unsigned int value = osi_readl((unsigned char *)osi_core->base + + nveu64_t temp = 0; + nveu32_t value = osi_readl((nveu8_t *)osi_core->base + offset); temp = last_value + value; @@ -56,13 +56,11 @@ static inline unsigned long update_mmc_val(struct osi_core_priv_data *osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_OUTOFBOUND, "Value overflow resetting all counters\n", - (unsigned long long)offset); + (nveul64_t)offset); mgbe_reset_mmc(osi_core); - } else { - return temp; } - return 0; + return temp; } /** @@ -75,14 +73,14 @@ static inline unsigned long update_mmc_val(struct osi_core_priv_data *osi_core, * 1) MAC should be init and started. see osi_start_mac() * 2) osi_core->osd should be populated */ -void mgbe_reset_mmc(struct osi_core_priv_data *osi_core) +void mgbe_reset_mmc(struct osi_core_priv_data *const osi_core) { - unsigned int value; + nveu32_t value; - value = osi_readl((unsigned char *)osi_core->base + MGBE_MMC_CNTRL); + value = osi_readl((nveu8_t *)osi_core->base + MGBE_MMC_CNTRL); /* self-clear bit in one clock cycle */ value |= MGBE_MMC_CNTRL_CNTRST; - osi_writel(value, (unsigned char *)osi_core->base + MGBE_MMC_CNTRL); + osi_writel(value, (nveu8_t *)osi_core->base + MGBE_MMC_CNTRL); osi_memset(&osi_core->mmc, 0U, sizeof(struct osi_mmc_counters)); } @@ -99,461 +97,461 @@ void mgbe_reset_mmc(struct osi_core_priv_data *osi_core) * 1) MAC should be init and started. see osi_start_mac() * 2) osi_core->osd should be populated */ -void mgbe_read_mmc(struct osi_core_priv_data *osi_core) +void mgbe_read_mmc(struct osi_core_priv_data *const osi_core) { struct osi_mmc_counters *mmc = &osi_core->mmc; mmc->mmc_tx_octetcount_gb = - update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb, MMC_TXOCTETCOUNT_GB_L); mmc->mmc_tx_octetcount_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_octetcount_gb_h, MMC_TXOCTETCOUNT_GB_H); mmc->mmc_tx_framecount_gb = - update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb, MMC_TXPACKETCOUNT_GB_L); mmc->mmc_tx_framecount_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_framecount_gb_h, MMC_TXPACKETCOUNT_GB_H); mmc->mmc_tx_broadcastframe_g = - update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g, MMC_TXBROADCASTPACKETS_G_L); mmc->mmc_tx_broadcastframe_g_h = - update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_broadcastframe_g_h, MMC_TXBROADCASTPACKETS_G_H); mmc->mmc_tx_multicastframe_g = - update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g, MMC_TXMULTICASTPACKETS_G_L); mmc->mmc_tx_multicastframe_g_h = - update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicastframe_g_h, MMC_TXMULTICASTPACKETS_G_H); mmc->mmc_tx_64_octets_gb = - update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb, MMC_TX64OCTETS_GB_L); mmc->mmc_tx_64_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_64_octets_gb_h, MMC_TX64OCTETS_GB_H); mmc->mmc_tx_65_to_127_octets_gb = - update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb, MMC_TX65TO127OCTETS_GB_L); mmc->mmc_tx_65_to_127_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_65_to_127_octets_gb_h, MMC_TX65TO127OCTETS_GB_H); mmc->mmc_tx_128_to_255_octets_gb = - update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb, MMC_TX128TO255OCTETS_GB_L); mmc->mmc_tx_128_to_255_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_128_to_255_octets_gb_h, MMC_TX128TO255OCTETS_GB_H); mmc->mmc_tx_256_to_511_octets_gb = - update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb, MMC_TX256TO511OCTETS_GB_L); mmc->mmc_tx_256_to_511_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_256_to_511_octets_gb_h, MMC_TX256TO511OCTETS_GB_H); mmc->mmc_tx_512_to_1023_octets_gb = - update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb, MMC_TX512TO1023OCTETS_GB_L); mmc->mmc_tx_512_to_1023_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_512_to_1023_octets_gb_h, MMC_TX512TO1023OCTETS_GB_H); mmc->mmc_tx_1024_to_max_octets_gb = - update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb, MMC_TX1024TOMAXOCTETS_GB_L); mmc->mmc_tx_1024_to_max_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_1024_to_max_octets_gb_h, MMC_TX1024TOMAXOCTETS_GB_H); mmc->mmc_tx_unicast_gb = - update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb, MMC_TXUNICASTPACKETS_GB_L); mmc->mmc_tx_unicast_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_unicast_gb_h, MMC_TXUNICASTPACKETS_GB_H); mmc->mmc_tx_multicast_gb = - update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb, MMC_TXMULTICASTPACKETS_GB_L); mmc->mmc_tx_multicast_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicast_gb_h, MMC_TXMULTICASTPACKETS_GB_H); mmc->mmc_tx_broadcast_gb = - update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb, MMC_TXBROADCASTPACKETS_GB_L); mmc->mmc_tx_broadcast_gb_h = - update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_broadcast_gb_h, MMC_TXBROADCASTPACKETS_GB_H); mmc->mmc_tx_underflow_error = - update_mmc_val(osi_core, mmc->mmc_tx_underflow_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_underflow_error, MMC_TXUNDERFLOWERROR_L); mmc->mmc_tx_underflow_error_h = - update_mmc_val(osi_core, mmc->mmc_tx_underflow_error_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_underflow_error_h, MMC_TXUNDERFLOWERROR_H); mmc->mmc_tx_singlecol_g = - update_mmc_val(osi_core, mmc->mmc_tx_singlecol_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_singlecol_g, MMC_TXSINGLECOL_G); mmc->mmc_tx_multicol_g = - update_mmc_val(osi_core, mmc->mmc_tx_multicol_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_multicol_g, MMC_TXMULTICOL_G); mmc->mmc_tx_deferred = - update_mmc_val(osi_core, mmc->mmc_tx_deferred, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_deferred, MMC_TXDEFERRED); mmc->mmc_tx_latecol = - update_mmc_val(osi_core, mmc->mmc_tx_latecol, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_latecol, MMC_TXLATECOL); mmc->mmc_tx_exesscol = - update_mmc_val(osi_core, mmc->mmc_tx_exesscol, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_exesscol, MMC_TXEXESSCOL); mmc->mmc_tx_carrier_error = - update_mmc_val(osi_core, mmc->mmc_tx_carrier_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_carrier_error, MMC_TXCARRIERERROR); mmc->mmc_tx_octetcount_g = - update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g, MMC_TXOCTETCOUNT_G_L); mmc->mmc_tx_octetcount_g_h = - update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_octetcount_g_h, MMC_TXOCTETCOUNT_G_H); mmc->mmc_tx_framecount_g = - update_mmc_val(osi_core, mmc->mmc_tx_framecount_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_framecount_g, MMC_TXPACKETSCOUNT_G_L); mmc->mmc_tx_framecount_g_h = - update_mmc_val(osi_core, mmc->mmc_tx_framecount_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_framecount_g_h, MMC_TXPACKETSCOUNT_G_H); mmc->mmc_tx_excessdef = - update_mmc_val(osi_core, mmc->mmc_tx_excessdef, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_excessdef, MMC_TXEXECESS_DEFERRED); mmc->mmc_tx_pause_frame = - update_mmc_val(osi_core, mmc->mmc_tx_pause_frame, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_pause_frame, MMC_TXPAUSEPACKETS_L); mmc->mmc_tx_pause_frame_h = - update_mmc_val(osi_core, mmc->mmc_tx_pause_frame_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_pause_frame_h, MMC_TXPAUSEPACKETS_H); mmc->mmc_tx_vlan_frame_g = - update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g, MMC_TXVLANPACKETS_G_L); mmc->mmc_tx_vlan_frame_g_h = - update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_vlan_frame_g_h, MMC_TXVLANPACKETS_G_H); mmc->mmc_rx_framecount_gb = - update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb, MMC_RXPACKETCOUNT_GB_L); mmc->mmc_rx_framecount_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_framecount_gb_h, MMC_RXPACKETCOUNT_GB_H); mmc->mmc_rx_octetcount_gb = - update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb, MMC_RXOCTETCOUNT_GB_L); mmc->mmc_rx_octetcount_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_octetcount_gb_h, MMC_RXOCTETCOUNT_GB_H); mmc->mmc_rx_octetcount_g = - update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g, MMC_RXOCTETCOUNT_G_L); mmc->mmc_rx_octetcount_g_h = - update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_octetcount_g_h, MMC_RXOCTETCOUNT_G_H); mmc->mmc_rx_broadcastframe_g = - update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g, MMC_RXBROADCASTPACKETS_G_L); mmc->mmc_rx_broadcastframe_g_h = - update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_broadcastframe_g_h, MMC_RXBROADCASTPACKETS_G_H); mmc->mmc_rx_multicastframe_g = - update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g, MMC_RXMULTICASTPACKETS_G_L); mmc->mmc_rx_multicastframe_g_h = - update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_multicastframe_g_h, MMC_RXMULTICASTPACKETS_G_H); mmc->mmc_rx_crc_error = - update_mmc_val(osi_core, mmc->mmc_rx_crc_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_crc_error, MMC_RXCRCERROR_L); mmc->mmc_rx_crc_error_h = - update_mmc_val(osi_core, mmc->mmc_rx_crc_error_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_crc_error_h, MMC_RXCRCERROR_H); mmc->mmc_rx_align_error = - update_mmc_val(osi_core, mmc->mmc_rx_align_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_align_error, MMC_RXALIGNMENTERROR); mmc->mmc_rx_runt_error = - update_mmc_val(osi_core, mmc->mmc_rx_runt_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_runt_error, MMC_RXRUNTERROR); mmc->mmc_rx_jabber_error = - update_mmc_val(osi_core, mmc->mmc_rx_jabber_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_jabber_error, MMC_RXJABBERERROR); mmc->mmc_rx_undersize_g = - update_mmc_val(osi_core, mmc->mmc_rx_undersize_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_undersize_g, MMC_RXUNDERSIZE_G); mmc->mmc_rx_oversize_g = - update_mmc_val(osi_core, mmc->mmc_rx_oversize_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_oversize_g, MMC_RXOVERSIZE_G); mmc->mmc_rx_64_octets_gb = - update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb, MMC_RX64OCTETS_GB_L); mmc->mmc_rx_64_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_64_octets_gb_h, MMC_RX64OCTETS_GB_H); mmc->mmc_rx_65_to_127_octets_gb = - update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb, MMC_RX65TO127OCTETS_GB_L); mmc->mmc_rx_65_to_127_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_65_to_127_octets_gb_h, MMC_RX65TO127OCTETS_GB_H); mmc->mmc_rx_128_to_255_octets_gb = - update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb, MMC_RX128TO255OCTETS_GB_L); mmc->mmc_rx_128_to_255_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_128_to_255_octets_gb_h, MMC_RX128TO255OCTETS_GB_H); mmc->mmc_rx_256_to_511_octets_gb = - update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb, MMC_RX256TO511OCTETS_GB_L); mmc->mmc_rx_256_to_511_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_256_to_511_octets_gb_h, MMC_RX256TO511OCTETS_GB_H); mmc->mmc_rx_512_to_1023_octets_gb = - update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb, MMC_RX512TO1023OCTETS_GB_L); mmc->mmc_rx_512_to_1023_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_512_to_1023_octets_gb_h, MMC_RX512TO1023OCTETS_GB_H); mmc->mmc_rx_1024_to_max_octets_gb = - update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb, MMC_RX1024TOMAXOCTETS_GB_L); mmc->mmc_rx_1024_to_max_octets_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_1024_to_max_octets_gb_h, MMC_RX1024TOMAXOCTETS_GB_H); mmc->mmc_rx_unicast_g = - update_mmc_val(osi_core, mmc->mmc_rx_unicast_g, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_unicast_g, MMC_RXUNICASTPACKETS_G_L); mmc->mmc_rx_unicast_g_h = - update_mmc_val(osi_core, mmc->mmc_rx_unicast_g_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_unicast_g_h, MMC_RXUNICASTPACKETS_G_H); mmc->mmc_rx_length_error = - update_mmc_val(osi_core, mmc->mmc_rx_length_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_length_error, MMC_RXLENGTHERROR_L); mmc->mmc_rx_length_error_h = - update_mmc_val(osi_core, mmc->mmc_rx_length_error_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_length_error_h, MMC_RXLENGTHERROR_H); mmc->mmc_rx_outofrangetype = - update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype, MMC_RXOUTOFRANGETYPE_L); mmc->mmc_rx_outofrangetype_h = - update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_outofrangetype_h, MMC_RXOUTOFRANGETYPE_H); mmc->mmc_rx_pause_frames = - update_mmc_val(osi_core, mmc->mmc_rx_pause_frames, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_pause_frames, MMC_RXPAUSEPACKETS_L); mmc->mmc_rx_pause_frames_h = - update_mmc_val(osi_core, mmc->mmc_rx_pause_frames_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_pause_frames_h, MMC_RXPAUSEPACKETS_H); mmc->mmc_rx_fifo_overflow = - update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow, MMC_RXFIFOOVERFLOW_L); mmc->mmc_rx_fifo_overflow_h = - update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_fifo_overflow_h, MMC_RXFIFOOVERFLOW_H); mmc->mmc_rx_vlan_frames_gb = - update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb, MMC_RXVLANPACKETS_GB_L); mmc->mmc_rx_vlan_frames_gb_h = - update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_vlan_frames_gb_h, MMC_RXVLANPACKETS_GB_H); mmc->mmc_rx_watchdog_error = - update_mmc_val(osi_core, mmc->mmc_rx_watchdog_error, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_watchdog_error, MMC_RXWATCHDOGERROR); mmc->mmc_tx_lpi_usec_cntr = - update_mmc_val(osi_core, mmc->mmc_tx_lpi_usec_cntr, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_lpi_usec_cntr, MMC_TXLPIUSECCNTR); mmc->mmc_tx_lpi_tran_cntr = - update_mmc_val(osi_core, mmc->mmc_tx_lpi_tran_cntr, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_lpi_tran_cntr, MMC_TXLPITRANCNTR); mmc->mmc_rx_lpi_usec_cntr = - update_mmc_val(osi_core, mmc->mmc_rx_lpi_usec_cntr, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_lpi_usec_cntr, MMC_RXLPIUSECCNTR); mmc->mmc_rx_lpi_tran_cntr = - update_mmc_val(osi_core, mmc->mmc_rx_lpi_tran_cntr, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_lpi_tran_cntr, MMC_RXLPITRANCNTR); mmc->mmc_rx_ipv4_gd = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd, MMC_RXIPV4_GD_PKTS_L); mmc->mmc_rx_ipv4_gd_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_h, MMC_RXIPV4_GD_PKTS_H); mmc->mmc_rx_ipv4_hderr = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr, MMC_RXIPV4_HDRERR_PKTS_L); mmc->mmc_rx_ipv4_hderr_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_h, MMC_RXIPV4_HDRERR_PKTS_H); mmc->mmc_rx_ipv4_nopay = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay, MMC_RXIPV4_NOPAY_PKTS_L); mmc->mmc_rx_ipv4_nopay_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_h, MMC_RXIPV4_NOPAY_PKTS_H); mmc->mmc_rx_ipv4_frag = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag, MMC_RXIPV4_FRAG_PKTS_L); mmc->mmc_rx_ipv4_frag_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_h, MMC_RXIPV4_FRAG_PKTS_H); mmc->mmc_rx_ipv4_udsbl = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl, MMC_RXIPV4_UBSBL_PKTS_L); mmc->mmc_rx_ipv4_udsbl_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_h, MMC_RXIPV4_UBSBL_PKTS_H); mmc->mmc_rx_ipv6_gd = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd, MMC_RXIPV6_GD_PKTS_L); mmc->mmc_rx_ipv6_gd_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_h, MMC_RXIPV6_GD_PKTS_H); mmc->mmc_rx_ipv6_hderr = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr, MMC_RXIPV6_HDRERR_PKTS_L); mmc->mmc_rx_ipv6_hderr_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_h, MMC_RXIPV6_HDRERR_PKTS_H); mmc->mmc_rx_ipv6_nopay = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay, MMC_RXIPV6_NOPAY_PKTS_L); mmc->mmc_rx_ipv6_nopay_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_h, MMC_RXIPV6_NOPAY_PKTS_H); mmc->mmc_rx_udp_gd = - update_mmc_val(osi_core, mmc->mmc_rx_udp_gd, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_gd, MMC_RXUDP_GD_PKTS_L); mmc->mmc_rx_udp_gd_h = - update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_h, MMC_RXUDP_GD_PKTS_H); mmc->mmc_rx_udp_err = - update_mmc_val(osi_core, mmc->mmc_rx_udp_err, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_err, MMC_RXUDP_ERR_PKTS_L); mmc->mmc_rx_udp_err_h = - update_mmc_val(osi_core, mmc->mmc_rx_udp_err_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_err_h, MMC_RXUDP_ERR_PKTS_H); mmc->mmc_rx_tcp_gd = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd, MMC_RXTCP_GD_PKTS_L); mmc->mmc_rx_tcp_gd_h = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_h, MMC_RXTCP_GD_PKTS_H); mmc->mmc_rx_tcp_err = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_err, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_err, MMC_RXTCP_ERR_PKTS_L); mmc->mmc_rx_tcp_err_h = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_h, MMC_RXTCP_ERR_PKTS_H); mmc->mmc_rx_icmp_gd = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd, MMC_RXICMP_GD_PKTS_L); mmc->mmc_rx_icmp_gd_h = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_h, MMC_RXICMP_GD_PKTS_H); mmc->mmc_rx_icmp_err = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_err, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_err, MMC_RXICMP_ERR_PKTS_L); mmc->mmc_rx_icmp_err_h = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_h, MMC_RXICMP_ERR_PKTS_H); mmc->mmc_rx_ipv4_gd_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets, MMC_RXIPV4_GD_OCTETS_L); mmc->mmc_rx_ipv4_gd_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_gd_octets_h, MMC_RXIPV4_GD_OCTETS_H); mmc->mmc_rx_ipv4_hderr_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets, MMC_RXIPV4_HDRERR_OCTETS_L); mmc->mmc_rx_ipv4_hderr_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_hderr_octets_h, MMC_RXIPV4_HDRERR_OCTETS_H); mmc->mmc_rx_ipv4_nopay_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets, MMC_RXIPV4_NOPAY_OCTETS_L); mmc->mmc_rx_ipv4_nopay_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_nopay_octets_h, MMC_RXIPV4_NOPAY_OCTETS_H); mmc->mmc_rx_ipv4_frag_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets, MMC_RXIPV4_FRAG_OCTETS_L); mmc->mmc_rx_ipv4_frag_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_frag_octets_h, MMC_RXIPV4_FRAG_OCTETS_H); mmc->mmc_rx_ipv4_udsbl_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets, MMC_RXIPV4_UDP_CHKSM_DIS_OCT_L); mmc->mmc_rx_ipv4_udsbl_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv4_udsbl_octets_h, MMC_RXIPV4_UDP_CHKSM_DIS_OCT_H); mmc->mmc_rx_udp_gd_octets = - update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets, MMC_RXUDP_GD_OCTETS_L); mmc->mmc_rx_udp_gd_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_gd_octets_h, MMC_RXUDP_GD_OCTETS_H); mmc->mmc_rx_ipv6_gd_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets, MMC_RXIPV6_GD_OCTETS_L); mmc->mmc_rx_ipv6_gd_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_gd_octets_h, MMC_RXIPV6_GD_OCTETS_H); mmc->mmc_rx_ipv6_hderr_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets, MMC_RXIPV6_HDRERR_OCTETS_L); mmc->mmc_rx_ipv6_hderr_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_hderr_octets_h, MMC_RXIPV6_HDRERR_OCTETS_H); mmc->mmc_rx_ipv6_nopay_octets = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets, MMC_RXIPV6_NOPAY_OCTETS_L); mmc->mmc_rx_ipv6_nopay_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_ipv6_nopay_octets_h, MMC_RXIPV6_NOPAY_OCTETS_H); mmc->mmc_rx_udp_err_octets = - update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets, MMC_RXUDP_ERR_OCTETS_L); mmc->mmc_rx_udp_err_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_udp_err_octets_h, MMC_RXUDP_ERR_OCTETS_H); mmc->mmc_rx_tcp_gd_octets = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets, MMC_RXTCP_GD_OCTETS_L); mmc->mmc_rx_tcp_gd_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_gd_octets_h, MMC_RXTCP_GD_OCTETS_H); mmc->mmc_rx_tcp_err_octets = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets, MMC_RXTCP_ERR_OCTETS_L); mmc->mmc_rx_tcp_err_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_tcp_err_octets_h, MMC_RXTCP_ERR_OCTETS_H); mmc->mmc_rx_icmp_gd_octets = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets, MMC_RXICMP_GD_OCTETS_L); mmc->mmc_rx_icmp_gd_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_gd_octets_h, MMC_RXICMP_GD_OCTETS_H); mmc->mmc_rx_icmp_err_octets = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets, MMC_RXICMP_ERR_OCTETS_L); mmc->mmc_rx_icmp_err_octets_h = - update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets_h, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_icmp_err_octets_h, MMC_RXICMP_ERR_OCTETS_H); mmc->mmc_tx_fpe_frag_cnt = - update_mmc_val(osi_core, mmc->mmc_tx_fpe_frag_cnt, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_fpe_frag_cnt, MMC_TX_FPE_FRAG_COUNTER); mmc->mmc_tx_fpe_hold_req_cnt = - update_mmc_val(osi_core, mmc->mmc_tx_fpe_hold_req_cnt, + mgbe_update_mmc_val(osi_core, mmc->mmc_tx_fpe_hold_req_cnt, MMC_TX_HOLD_REQ_COUNTER); mmc->mmc_rx_packet_reass_err_cnt = - update_mmc_val(osi_core, mmc->mmc_rx_packet_reass_err_cnt, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_packet_reass_err_cnt, MMC_RX_PKT_ASSEMBLY_ERR_CNTR); mmc->mmc_rx_packet_smd_err_cnt = - update_mmc_val(osi_core, mmc->mmc_rx_packet_smd_err_cnt, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_packet_smd_err_cnt, MMC_RX_PKT_SMD_ERR_CNTR); mmc->mmc_rx_packet_asm_ok_cnt = - update_mmc_val(osi_core, mmc->mmc_rx_packet_asm_ok_cnt, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_packet_asm_ok_cnt, MMC_RX_PKT_ASSEMBLY_OK_CNTR); mmc->mmc_rx_fpe_fragment_cnt = - update_mmc_val(osi_core, mmc->mmc_rx_fpe_fragment_cnt, + mgbe_update_mmc_val(osi_core, mmc->mmc_rx_fpe_fragment_cnt, MMC_RX_FPE_FRAG_CNTR); } diff --git a/kernel/nvethernetrm/osi/core/mgbe_mmc.h b/kernel/nvethernetrm/osi/core/mgbe_mmc.h index 957577d384..f904a43d5f 100644 --- a/kernel/nvethernetrm/osi/core/mgbe_mmc.h +++ b/kernel/nvethernetrm/osi/core/mgbe_mmc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -67,13 +67,6 @@ #define MMC_TXVLANPACKETS_G_H 0x008A0 #define MMC_TXLPIUSECCNTR 0x008A4 #define MMC_TXLPITRANCNTR 0x008A8 -#define MMC_PRIO_INT_STATUS 0x008CC -#define MMC_TX_PER_PRIO_STATUS 0x008D0 -#define MMC_TX_PER_PRIO_PKT_GB 0x008D4 -#define MMC_TX_PER_PRIO_PFC_PKT_GB 0x008D8 -#define MMC_TX_PER_PRIO_GPFC_PKT_GB 0x008DC -#define MMC_TX_PER_PRIO_OCTET_GB_L 0x008E0 -#define MMC_TX_PER_PRIO_OCTET_GB_H 0x008E4 #define MMC_RXPACKETCOUNT_GB_L 0x00900 #define MMC_RXPACKETCOUNT_GB_H 0x00904 @@ -118,24 +111,9 @@ #define MMC_RXWATCHDOGERROR 0x009A0 #define MMC_RXLPIUSECCNTR 0x009A4 #define MMC_RXLPITRANCNTR 0x009A8 -#define MMC_RX_DISCARD_PKTS_GB_L 0x009AC -#define MMC_RX_DISCARD_PKTS_GB_H 0x009B0 -#define MMC_RX_DISCARD_OCTET_GB_L 0x009B4 -#define MMC_RX_DISCARD_OCTET_GB_H 0x009B8 #define MMC_RXALIGNMENTERROR 0x009BC -#define MMC_RX_PER_PRIO_STATUS 0x009D0 -#define MMC_RX_PER_PRIO_PKT_GB 0x009D4 -#define MMC_RX_PER_PRIO_PKT_B 0x009D8 -#define MMC_RX_PER_PRIO_PFC_PKT_GB 0x009DC -#define MMC_RX_PER_PRIO_OCTET_GB_L 0x009E0 -#define MMC_RX_PER_PRIO_OCTET_GB_H 0x009E4 -#define MMC_RX_PER_PRIO_DISCARD_GB 0x009E8 -#define MMC_FPE_TX_INT 0x00A00 -#define MMC_FPE_TX_INT_MASK 0x00A04 #define MMC_TX_FPE_FRAG_COUNTER 0x00A08 #define MMC_TX_HOLD_REQ_COUNTER 0x00A0C -#define MMC_FPE_RX_INT 0x00A20 -#define MMC_FPE_RX_INT_MASK 0x00A24 #define MMC_RX_PKT_ASSEMBLY_ERR_CNTR 0x00A28 #define MMC_RX_PKT_SMD_ERR_CNTR 0x00A2C #define MMC_RX_PKT_ASSEMBLY_OK_CNTR 0x00A30 @@ -147,8 +125,6 @@ #define MMC_TXEXESSCOL 0x00A50 #define MMC_TXCARRIERERROR 0x00A54 #define MMC_TXEXECESS_DEFERRED 0x00A58 -#define MMC_IPC_RX_INT_MASK 0x00A5C -#define MMC_IPC_RX_INT 0x00A60 #define MMC_RXIPV4_GD_PKTS_L 0x00A64 #define MMC_RXIPV4_GD_PKTS_H 0x00A68 #define MMC_RXIPV4_HDRERR_PKTS_L 0x00A6C @@ -220,7 +196,7 @@ * 1) MAC should be init and started. see osi_start_mac() * 2) osi_core->osd should be populated */ -void mgbe_read_mmc(struct osi_core_priv_data *osi_core); +void mgbe_read_mmc(struct osi_core_priv_data *const osi_core); /** * @brief mgbe_reset_mmc - To reset MMC registers and ether_mmc_counter @@ -232,5 +208,5 @@ void mgbe_read_mmc(struct osi_core_priv_data *osi_core); * 1) MAC should be init and started. see osi_start_mac() * 2) osi_core->osd should be populated */ -void mgbe_reset_mmc(struct osi_core_priv_data *osi_core); +void mgbe_reset_mmc(struct osi_core_priv_data *const osi_core); #endif diff --git a/kernel/nvethernetrm/osi/core/osi_core.c b/kernel/nvethernetrm/osi/core/osi_core.c index 75c9e721f2..f59884d482 100644 --- a/kernel/nvethernetrm/osi/core/osi_core.c +++ b/kernel/nvethernetrm/osi/core/osi_core.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -25,40 +25,13 @@ #include "core_local.h" #include "../osi/common/common.h" -#ifdef HSI_SUPPORT -/** - * @brief hsi_err_code - Arry of error code and reporter ID to be use by - * each Ethernet controller instance - * a condition is met or a timeout occurs - * Below is the data: - * uncorrectable_error_code, correctable_error_code, reporter ID - * hsi_err_code[0] to hsi_err_code[3] for MGBE instance - * hsi_err_code[4] is for EQOS - */ -nveu32_t hsi_err_code[][3] = { - {0x2A00, 0x2E08, 0x8019}, - {0x2A01, 0x2E09, 0x801A}, - {0x2A02, 0x2E0A, 0x801B}, - {0x2A03, 0x2E0B, 0x801C}, - {0x28AD, 0x2DE6, 0x8009}, -}; -#endif - -/** - * @brief g_core - Static core local data array - */ static struct core_local g_core[MAX_CORE_INSTANCES]; -/** - * @brief if_ops - Static core interface operations for virtual/non-virtual - * case - */ -static struct if_core_ops if_ops[MAX_INTERFACE_OPS]; - /** * @brief Function to validate function pointers. * * @param[in] osi_core: OSI Core private data structure. + * @param[in] if_ops_p: pointer to interface core operations. * * @note * API Group: @@ -74,34 +47,39 @@ static nve32_t validate_if_func_ptrs(struct osi_core_priv_data *const osi_core, { nveu32_t i = 0; void *temp_ops = (void *)if_ops_p; + nve32_t ret = 0; #if __SIZEOF_POINTER__ == 8 nveu64_t *l_ops = (nveu64_t *)temp_ops; #elif __SIZEOF_POINTER__ == 4 nveu32_t *l_ops = (nveu32_t *)temp_ops; #else - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Undefined architecture\n", 0ULL); - return -1; + ret = -1; + goto fail; #endif + (void) osi_core; for (i = 0; i < (sizeof(*if_ops_p) / (nveu64_t)__SIZEOF_POINTER__); i++) { if (*l_ops == 0U) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "failed at index : ", i); - return -1; + ret = -1; + goto fail; } l_ops++; } - - return 0; +fail: + return ret; } /** * @brief Function to validate input arguments of API. * * @param[in] osi_core: OSI Core private data structure. + * @param[in] l_core: Core local private data structure. * * @note * API Group: @@ -115,17 +93,20 @@ static nve32_t validate_if_func_ptrs(struct osi_core_priv_data *const osi_core, static inline nve32_t validate_if_args(struct osi_core_priv_data *const osi_core, struct core_local *l_core) { + nve32_t ret = 0; + if ((osi_core == OSI_NULL) || (l_core->if_init_done == OSI_DISABLE) || (l_core->magic_num != (nveu64_t)osi_core)) { - return -1; + ret = -1; } - return 0; + return ret; } struct osi_core_priv_data *osi_get_core(void) { nveu32_t i; + struct osi_core_priv_data *osi_core = OSI_NULL; for (i = 0U; i < MAX_CORE_INSTANCES; i++) { if (g_core[i].if_init_done == OSI_ENABLE) { @@ -136,7 +117,7 @@ struct osi_core_priv_data *osi_get_core(void) } if (i == MAX_CORE_INSTANCES) { - return OSI_NULL; + goto fail; } g_core[i].magic_num = (nveu64_t)&g_core[i].osi_core; @@ -145,45 +126,55 @@ struct osi_core_priv_data *osi_get_core(void) g_core[i].tx_ts_head.next = &g_core[i].tx_ts_head; g_core[i].pps_freq = OSI_DISABLE; - return &g_core[i].osi_core; + osi_core = &g_core[i].osi_core; + osi_memset(osi_core, 0, sizeof(struct osi_core_priv_data)); +fail: + return osi_core; } struct osi_core_priv_data *get_role_pointer(nveu32_t role) { nveu32_t i; + struct osi_core_priv_data *ret_ptr = OSI_NULL; if ((role != OSI_PTP_M2M_PRIMARY) && (role != OSI_PTP_M2M_SECONDARY)) { - return OSI_NULL; + goto done; } /* Current approch to give pointer for 1st role */ for (i = 0U; i < MAX_CORE_INSTANCES; i++) { if ((g_core[i].if_init_done == OSI_ENABLE) && (g_core[i].ether_m2m_role == role)) { - return &g_core[i].osi_core; + ret_ptr = &g_core[i].osi_core; + break; } } - return OSI_NULL; +done: + return ret_ptr; } nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; - nve32_t ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + static struct if_core_ops if_ops[MAX_INTERFACE_OPS]; + nve32_t ret = 0; if (osi_core == OSI_NULL) { - return -1; + ret = -1; + goto fail; } if (osi_core->use_virtualization > OSI_ENABLE) { - return ret; + ret = -1; + goto fail; } if ((l_core->magic_num != (nveu64_t)osi_core) || (l_core->if_init_done == OSI_ENABLE)) { - return -1; + ret = -1; + goto fail; } l_core->if_ops_p = &if_ops[osi_core->use_virtualization]; @@ -195,16 +186,17 @@ nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core) } if (validate_if_func_ptrs(osi_core, l_core->if_ops_p) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Interface function validation failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } ret = l_core->if_ops_p->if_init_core_ops(osi_core); if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "if_init_core_ops failed\n", 0ULL); - return ret; + goto fail; } l_core->ts_lock = OSI_DISABLE; l_core->ether_m2m_role = osi_core->m2m_role; @@ -228,11 +220,11 @@ nve32_t osi_init_core_ops(struct osi_core_priv_data *const osi_core) if (osi_core->pps_frq <= OSI_ENABLE) { l_core->pps_freq = osi_core->pps_frq; } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid pps_frq\n", (nveu64_t)osi_core->pps_frq); ret = -1; } - +fail: return ret; } @@ -240,67 +232,79 @@ nve32_t osi_write_phy_reg(struct osi_core_priv_data *const osi_core, const nveu32_t phyaddr, const nveu32_t phyreg, const nveu16_t phydata) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if (validate_if_args(osi_core, l_core) < 0) { - return -1; + goto fail; } - return l_core->if_ops_p->if_write_phy_reg(osi_core, phyaddr, phyreg, - phydata); + ret = l_core->if_ops_p->if_write_phy_reg(osi_core, phyaddr, phyreg, + phydata); +fail: + return ret; } nve32_t osi_read_phy_reg(struct osi_core_priv_data *const osi_core, const nveu32_t phyaddr, const nveu32_t phyreg) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if (validate_if_args(osi_core, l_core) < 0) { - return -1; + goto fail; } - return l_core->if_ops_p->if_read_phy_reg(osi_core, phyaddr, phyreg); + ret = l_core->if_ops_p->if_read_phy_reg(osi_core, phyaddr, phyreg); +fail: + return ret; } -nve32_t osi_hw_core_init(struct osi_core_priv_data *const osi_core, - nveu32_t tx_fifo_size, nveu32_t rx_fifo_size) +nve32_t osi_hw_core_init(struct osi_core_priv_data *const osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if (validate_if_args(osi_core, l_core) < 0) { - return -1; + goto fail; } - return l_core->if_ops_p->if_core_init(osi_core, tx_fifo_size, - rx_fifo_size); + ret = l_core->if_ops_p->if_core_init(osi_core); +fail: + return ret; } nve32_t osi_hw_core_deinit(struct osi_core_priv_data *const osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if (validate_if_args(osi_core, l_core) < 0) { - return -1; + goto fail; } - return l_core->if_ops_p->if_core_deinit(osi_core); + ret = l_core->if_ops_p->if_core_deinit(osi_core); +fail: + return ret; } nve32_t osi_handle_ioctl(struct osi_core_priv_data *osi_core, struct osi_ioctl *data) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; nve32_t ret = -1; if (validate_if_args(osi_core, l_core) < 0) { - return ret; + goto fail; } if (data == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: Invalid argument\n", 0ULL); - return ret; + goto fail; } - return l_core->if_ops_p->if_handle_ioctl(osi_core, data); + ret = l_core->if_ops_p->if_handle_ioctl(osi_core, data); +fail: + return ret; } diff --git a/kernel/nvethernetrm/osi/core/osi_hal.c b/kernel/nvethernetrm/osi/core/osi_hal.c index 0407070294..436412796a 100644 --- a/kernel/nvethernetrm/osi/core/osi_hal.c +++ b/kernel/nvethernetrm/osi/core/osi_hal.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,16 +24,19 @@ #include #include "core_local.h" #include "../osi/common/common.h" -#include "vlan_filter.h" +#include "core_common.h" +#include "eqos_core.h" +#include "mgbe_core.h" #include "frp.h" #ifdef OSI_DEBUG #include "debug.h" #endif /* OSI_DEBUG */ - +#ifndef OSI_STRIPPED_LIB +#include "vlan_filter.h" +#endif /** * @brief g_ops - Static core operations array. */ -static struct core_ops g_ops[MAX_MAC_IP_TYPES]; /** * @brief Function to validate input arguments of API. @@ -51,15 +54,17 @@ static struct core_ops g_ops[MAX_MAC_IP_TYPES]; * @retval -1 on Failure */ static inline nve32_t validate_args(struct osi_core_priv_data *const osi_core, - struct core_local *l_core) + struct core_local *const l_core) { + nve32_t ret = 0; + if ((osi_core == OSI_NULL) || (osi_core->base == OSI_NULL) || (l_core->init_done == OSI_DISABLE) || (l_core->magic_num != (nveu64_t)osi_core)) { - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -81,79 +86,152 @@ static nve32_t validate_func_ptrs(struct osi_core_priv_data *const osi_core, struct core_ops *ops_p) { nveu32_t i = 0; + nve32_t ret = 0; void *temp_ops = (void *)ops_p; #if __SIZEOF_POINTER__ == 8 nveu64_t *l_ops = (nveu64_t *)temp_ops; #elif __SIZEOF_POINTER__ == 4 nveu32_t *l_ops = (nveu32_t *)temp_ops; #else - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Undefined architecture\n", 0ULL); - return -1; + ret = -1; + goto fail; #endif + (void) osi_core; for (i = 0; i < (sizeof(*ops_p) / (nveu64_t)__SIZEOF_POINTER__); i++) { if (*l_ops == 0U) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "core: fn ptr validation failed at\n", (nveu64_t)i); - return -1; + ret = -1; + goto fail; } l_ops++; } - - return 0; +fail: + return ret; } -nve32_t osi_hal_write_phy_reg(struct osi_core_priv_data *const osi_core, - const nveu32_t phyaddr, const nveu32_t phyreg, - const nveu16_t phydata) +/** + * @brief osi_hal_write_phy_reg - HW API to Write to a PHY register through MAC + * over MDIO bus. + * + * @note + * Algorithm: + * - Before proceeding for reading for PHY register check whether any MII + * operation going on MDIO bus by polling MAC_GMII_BUSY bit. + * - Program data into MAC MDIO data register. + * - Populate required parameters like phy address, phy register etc,, + * in MAC MDIO Address register. write and GMII busy bits needs to be set + * in this operation. + * - Write into MAC MDIO address register poll for GMII busy for MDIO + * operation to complete. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] phyaddr: PHY address (PHY ID) associated with PHY + * @param[in] phyreg: Register which needs to be write to PHY. + * @param[in] phydata: Data to write to a PHY register. + * + * @pre MAC should be init and started. see osi_start_mac() + * + * @note + * Traceability Details: + * - SWUD_ID: TODO + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: Yes + * - Run time: Yes + * - De-initialization: No + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t osi_hal_write_phy_reg(struct osi_core_priv_data *const osi_core, + const nveu32_t phyaddr, const nveu32_t phyreg, + const nveu16_t phydata) { - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } + struct core_local *l_core = (struct core_local *)(void *)osi_core; return l_core->ops_p->write_phy_reg(osi_core, phyaddr, phyreg, phydata); } -nve32_t osi_hal_read_phy_reg(struct osi_core_priv_data *const osi_core, - const nveu32_t phyaddr, const nveu32_t phyreg) -{ - struct core_local *l_core = (struct core_local *)osi_core; +/** + * @brief osi_hal_read_phy_reg - HW API to Read from a PHY register through MAC + * over MDIO bus. + * + * @note + * Algorithm: + * - Before proceeding for reading for PHY register check whether any MII + * operation going on MDIO bus by polling MAC_GMII_BUSY bit. + * - Populate required parameters like phy address, phy register etc,, + * in program it in MAC MDIO Address register. Read and GMII busy bits + * needs to be set in this operation. + * - Write into MAC MDIO address register poll for GMII busy for MDIO + * operation to complete. After this data will be available at MAC MDIO + * data register. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] phyaddr: PHY address (PHY ID) associated with PHY + * @param[in] phyreg: Register which needs to be read from PHY. + * + * @pre MAC should be init and started. see osi_start_mac() + * + * @note + * Traceability Details: + * - SWUD_ID: TODO + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: Yes + * - Run time: Yes + * - De-initialization: No + * + * @retval data from PHY register on success + * @retval -1 on failure + */ +static nve32_t osi_hal_read_phy_reg(struct osi_core_priv_data *const osi_core, + const nveu32_t phyaddr, const nveu32_t phyreg) - if (validate_args(osi_core, l_core) < 0) { - return -1; - } +{ + struct core_local *l_core = (struct core_local *)(void *)osi_core; return l_core->ops_p->read_phy_reg(osi_core, phyaddr, phyreg); } static nve32_t osi_hal_init_core_ops(struct osi_core_priv_data *const osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; - typedef void (*init_ops_arr)(struct core_ops *); - typedef void *(*safety_init)(void); - - init_ops_arr i_ops[MAX_MAC_IP_TYPES][MAX_MAC_IP_TYPES] = { + nve32_t ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + typedef void (*init_core_ops_arr)(struct core_ops *local_ops); + static struct core_ops g_ops[MAX_MAC_IP_TYPES]; + init_core_ops_arr i_ops[MAX_MAC_IP_TYPES][MAX_MAC_IP_TYPES] = { { eqos_init_core_ops, OSI_NULL }, { mgbe_init_core_ops, OSI_NULL } }; - safety_init s_init[MAX_MAC_IP_TYPES][MAX_MAC_IP_TYPES] = { - { eqos_get_core_safety_config, ivc_get_core_safety_config }, - { OSI_NULL, OSI_NULL } - }; - if (osi_core == OSI_NULL) { - return -1; + goto exit; } if ((l_core->magic_num != (nveu64_t)osi_core) || (l_core->init_done == OSI_ENABLE)) { - return -1; + goto exit; } if ((osi_core->osd_ops.ops_log == OSI_NULL) || @@ -163,54 +241,40 @@ static nve32_t osi_hal_init_core_ops(struct osi_core_priv_data *const osi_core) (osi_core->osd_ops.printf == OSI_NULL) || #endif /* OSI_DEBUG */ (osi_core->osd_ops.usleep_range == OSI_NULL)) { - return -1; + goto exit; } if (osi_core->mac > OSI_MAC_HW_MGBE) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid MAC HW type\n", 0ULL); - return -1; + goto exit; } if (osi_core->use_virtualization > OSI_ENABLE) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid use_virtualization value\n", 0ULL); - return -1; + goto exit; } if (i_ops[osi_core->mac][osi_core->use_virtualization] != OSI_NULL) { i_ops[osi_core->mac][osi_core->use_virtualization](&g_ops[osi_core->mac]); } - if (s_init[osi_core->mac][osi_core->use_virtualization] != OSI_NULL) { - osi_core->safety_config = - s_init[osi_core->mac][osi_core->use_virtualization](); - } - if (validate_func_ptrs(osi_core, &g_ops[osi_core->mac]) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "core: function ptrs validation failed\n", 0ULL); - return -1; + goto exit; } l_core->ops_p = &g_ops[osi_core->mac]; l_core->init_done = OSI_ENABLE; - return 0; -} - -nve32_t osi_poll_for_mac_reset_complete( - struct osi_core_priv_data *const osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - return l_core->ops_p->poll_for_swr(osi_core); + ret = 0; +exit: + return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief init_vlan_filters - Helper function to init all VLAN SW information. * @@ -220,7 +284,7 @@ nve32_t osi_poll_for_mac_reset_complete( */ static inline void init_vlan_filters(struct osi_core_priv_data *const osi_core) { - unsigned int i = 0U; + nveu32_t i = 0U; for (i = 0; i < VLAN_NUM_VID; i++) { osi_core->vid[i] = VLAN_ID_INVALID; @@ -229,176 +293,341 @@ static inline void init_vlan_filters(struct osi_core_priv_data *const osi_core) osi_core->vf_bitmap = 0U; osi_core->vlan_filter_cnt = 0U; } +#endif -nve32_t osi_hal_hw_core_init(struct osi_core_priv_data *const osi_core, - nveu32_t tx_fifo_size, nveu32_t rx_fifo_size) +/** + * @brief osi_hal_hw_core_deinit - HW API for MAC deinitialization. + * + * @note + * Algorithm: + * - Stops MAC transmission and reception. + * + * @param[in] osi_core: OSI core private data structure. + * + * @pre MAC has to be out of reset. + * + * @note + * Traceability Details: + * - SWUD_ID: TODO + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: No + * - Run time: No + * - De-initialization: Yes + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t osi_hal_hw_core_deinit(struct osi_core_priv_data *const osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; - nve32_t ret; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - init_vlan_filters(osi_core); + struct core_local *l_core = (struct core_local *)(void *)osi_core; - /* Init FRP */ - init_frp(osi_core); + /* Stop the MAC */ + hw_stop_mac(osi_core); - ret = l_core->ops_p->core_init(osi_core, tx_fifo_size, rx_fifo_size); + /* Disable MAC interrupts */ + osi_writela(osi_core, 0U, ((nveu8_t *)osi_core->base + HW_MAC_IER)); - if (ret == 0) { - l_core->hw_init_successful = OSI_ENABLE; + if (l_core->l_mac_ver != MAC_CORE_VER_TYPE_EQOS) { + osi_writela(osi_core, 0U, + ((nveu8_t *)osi_core->base + WRAP_COMMON_INTR_ENABLE)); } - return ret; -} + /* Handle the common interrupt if any status bits set */ + l_core->ops_p->handle_common_intr(osi_core); -nve32_t osi_hal_hw_core_deinit(struct osi_core_priv_data *const osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; + l_core->hw_init_successful = OSI_DISABLE; - if (validate_args(osi_core, l_core) < 0) { - return -1; + if (l_core->state != OSI_SUSPENDED) { + /* Reset restore operation flags on interface down */ + l_core->cfg.flags = OSI_DISABLE; } - l_core->hw_init_successful = OSI_DISABLE; - l_core->ops_p->core_deinit(osi_core); - - /* FIXME: Should be fixed */ - //l_core->init_done = OSI_DISABLE; - //l_core->magic_num = 0; + l_core->state = OSI_DISABLE; return 0; } -nve32_t osi_start_mac(struct osi_core_priv_data *const osi_core) +/** + * @brief div_u64 - Calls a function which returns quotient + * + * @param[in] dividend: Dividend + * @param[in] divisor: Divisor + * + * @pre MAC IP should be out of reset and need to be initialized as the + * requirements. + * + * + * @note + * API Group: + * - Initialization: No + * - Run time: Yes + * - De-initialization: No + * @returns Quotient + */ +static inline nveu64_t div_u64(nveu64_t dividend, + nveu64_t divisor) { - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - l_core->ops_p->start_mac(osi_core); + nveu64_t remain; - return 0; + return div_u64_rem(dividend, divisor, &remain); } -nve32_t osi_stop_mac(struct osi_core_priv_data *const osi_core) +/** + * @brief osi_ptp_configuration - Configure PTP + * + * @note + * Algorithm: + * - Configure the PTP registers that are required for PTP. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] enable: Enable or disable Time Stamping. 0: Disable 1: Enable + * + * @pre + * - MAC should be init and started. see osi_start_mac() + * - osi->ptp_config.ptp_filter need to be filled accordingly to the + * filter that need to be set for PTP packets. Please check osi_ptp_config + * structure declaration on the bit fields that need to be filled. + * - osi->ptp_config.ptp_clock need to be filled with the ptp system clk. + * Currently it is set to 62500000Hz. + * - osi->ptp_config.ptp_ref_clk_rate need to be filled with the ptp + * reference clock that platform supports. + * - osi->ptp_config.sec need to be filled with current time of seconds + * - osi->ptp_config.nsec need to be filled with current time of nseconds + * - osi->base need to be filled with the ioremapped base address + * + * @note + * Traceability Details: + * - SWUD_ID: ETHERNET_NVETHERNETRM_021 + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: Yes + * - Run time: Yes + * - De-initialization: No + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t osi_ptp_configuration(struct osi_core_priv_data *const osi_core, + OSI_UNUSED const nveu32_t enable) { - struct core_local *l_core = (struct core_local *)osi_core; +#ifndef OSI_STRIPPED_LIB + struct core_local *l_core = (struct core_local *)(void *)osi_core; +#endif /* !OSI_STRIPPED_LIB */ + nve32_t ret = 0; + nveu64_t temp = 0, temp1 = 0, temp2 = 0; + nveu64_t ssinc = 0; - if (validate_args(osi_core, l_core) < 0) { - return -1; - } +#ifndef OSI_STRIPPED_LIB + if (enable == OSI_DISABLE) { + /* disable hw time stamping */ + /* Program MAC_Timestamp_Control Register */ + hw_config_tscr(osi_core, OSI_DISABLE); + /* Disable PTP RX Queue routing */ + ret = l_core->ops_p->config_ptp_rxq(osi_core, + osi_core->ptp_config.ptp_rx_queue, + OSI_DISABLE); + } else { +#endif /* !OSI_STRIPPED_LIB */ + /* Program MAC_Timestamp_Control Register */ + hw_config_tscr(osi_core, osi_core->ptp_config.ptp_filter); - l_core->ops_p->stop_mac(osi_core); + /* Program Sub Second Increment Register */ + hw_config_ssir(osi_core); - return 0; -} + /* formula for calculating addend value is + * TSAR = (2^32 * 1000) / (ptp_ref_clk_rate in MHz * SSINC) + * 2^x * y == (y << x), hence + * 2^32 * 1000 == (1000 << 32) + * so addend = (2^32 * 1000)/(ptp_ref_clk_rate in MHZ * SSINC); + */ + ssinc = OSI_PTP_SSINC_4; + if (osi_core->mac_ver == OSI_EQOS_MAC_5_30) { + ssinc = OSI_PTP_SSINC_6; + } -nve32_t osi_common_isr(struct osi_core_priv_data *const osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; + temp = ((nveu64_t)1000 << 32); + temp = (nveu64_t)temp * 1000000U; - if (validate_args(osi_core, l_core) < 0) { - return -1; - } + temp1 = div_u64(temp, + (nveu64_t)osi_core->ptp_config.ptp_ref_clk_rate); - l_core->ops_p->handle_common_intr(osi_core); + temp2 = div_u64(temp1, (nveu64_t)ssinc); - return 0; -} + if (temp2 < UINT_MAX) { + osi_core->default_addend = (nveu32_t)temp2; + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "core: temp2 >= UINT_MAX\n", 0ULL); + ret = -1; + goto fail; + } -nve32_t osi_set_mode(struct osi_core_priv_data *const osi_core, - const nve32_t mode) -{ - struct core_local *l_core = (struct core_local *)osi_core; + /* Program addend value */ + ret = hw_config_addend(osi_core, osi_core->default_addend); - if (validate_args(osi_core, l_core) < 0) { - return -1; + /* Set current time */ + if (ret == 0) { + ret = hw_set_systime_to_mac(osi_core, + osi_core->ptp_config.sec, + osi_core->ptp_config.nsec); +#ifndef OSI_STRIPPED_LIB + if (ret == 0) { + /* Enable PTP RX Queue routing */ + ret = l_core->ops_p->config_ptp_rxq(osi_core, + osi_core->ptp_config.ptp_rx_queue, + OSI_ENABLE); + } +#endif /* !OSI_STRIPPED_LIB */ + } +#ifndef OSI_STRIPPED_LIB } - - return l_core->ops_p->set_mode(osi_core, mode); +#endif /* !OSI_STRIPPED_LIB */ +fail: + return ret; } -nve32_t osi_set_speed(struct osi_core_priv_data *const osi_core, - const nve32_t speed) +static nve32_t osi_get_mac_version(struct osi_core_priv_data *const osi_core, nveu32_t *mac_ver) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + nve32_t ret = 0; - if (validate_args(osi_core, l_core) < 0) { - return -1; + *mac_ver = osi_readla(osi_core, ((nveu8_t *)osi_core->base + (nve32_t)MAC_VERSION)) & + MAC_VERSION_SNVER_MASK; + + if (validate_mac_ver_update_chans(*mac_ver, &l_core->num_max_chans, + &l_core->l_mac_ver) == 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid MAC version\n", (nveu64_t)*mac_ver) + ret = -1; } - return l_core->ops_p->set_speed(osi_core, speed); + return ret; } -nve32_t osi_pad_calibrate(struct osi_core_priv_data *const osi_core) +static nve32_t osi_hal_hw_core_init(struct osi_core_priv_data *const osi_core) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + const nveu32_t ptp_ref_clk_rate[3] = {EQOS_X_PTP_CLK_SPEED, EQOS_PTP_CLK_SPEED, + MGBE_PTP_CLK_SPEED}; + nve32_t ret; - if (validate_args(osi_core, l_core) < 0) { - return -1; + ret = osi_get_mac_version(osi_core, &osi_core->mac_ver); + if (ret < 0) { + goto fail; } - return l_core->ops_p->pad_calibrate(osi_core); -} + /* Bring MAC out of reset */ + ret = hw_poll_for_swr(osi_core); + if (ret < 0) { + goto fail; + } -nve32_t osi_config_fw_err_pkts(struct osi_core_priv_data *const osi_core, - const nveu32_t qinx, const nveu32_t fw_err) -{ - struct core_local *l_core = (struct core_local *)osi_core; +#ifndef OSI_STRIPPED_LIB + init_vlan_filters(osi_core); - if (validate_args(osi_core, l_core) < 0) { - return -1; +#endif /* !OSI_STRIPPED_LIB */ + + ret = l_core->ops_p->core_init(osi_core); + if (ret < 0) { + goto fail; + } + + /* By default set MAC to Full duplex mode. + * Since this is a local function it will always return sucess, + * so no need to check for return value + */ + (void)hw_set_mode(osi_core, OSI_FULL_DUPLEX); + + /* By default enable rxcsum */ + ret = hw_config_rxcsum_offload(osi_core, OSI_ENABLE); + if (ret == 0) { + l_core->cfg.rxcsum = OSI_ENABLE; + l_core->cfg.flags |= DYNAMIC_CFG_RXCSUM; + } + + /* Set default PTP settings */ + osi_core->ptp_config.ptp_rx_queue = 3U; + osi_core->ptp_config.ptp_ref_clk_rate = ptp_ref_clk_rate[l_core->l_mac_ver]; + osi_core->ptp_config.ptp_filter = OSI_MAC_TCR_TSENA | OSI_MAC_TCR_TSCFUPDT | + OSI_MAC_TCR_TSCTRLSSR | OSI_MAC_TCR_TSVER2ENA | + OSI_MAC_TCR_TSIPENA | OSI_MAC_TCR_TSIPV6ENA | + OSI_MAC_TCR_TSIPV4ENA | OSI_MAC_TCR_SNAPTYPSEL_1; + osi_core->ptp_config.sec = 0; + osi_core->ptp_config.nsec = 0; + osi_core->ptp_config.one_nsec_accuracy = OSI_ENABLE; + ret = osi_ptp_configuration(osi_core, OSI_ENABLE); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Fail to configure PTP\n", 0ULL); + goto fail; } - /* Configure Forwarding of Error packets */ - return l_core->ops_p->config_fw_err_pkts(osi_core, qinx, fw_err); + /* Start the MAC */ + hw_start_mac(osi_core); + + l_core->lane_status = OSI_ENABLE; + l_core->hw_init_successful = OSI_ENABLE; + +fail: + return ret; } +#ifndef OSI_STRIPPED_LIB static nve32_t conf_ptp_offload(struct osi_core_priv_data *const osi_core, struct osi_pto_config *const pto_config) { - struct core_local *l_core = (struct core_local *)osi_core; - int ret = -1; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + nve32_t ret = -1; /* Validate input arguments */ if (pto_config == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "pto_config is NULL\n", 0ULL); return ret; } - if (pto_config->mc_uc != OSI_ENABLE && - pto_config->mc_uc != OSI_DISABLE) { + if ((pto_config->mc_uc != OSI_ENABLE) && + (pto_config->mc_uc != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid mc_uc flag value\n", (nveul64_t)pto_config->mc_uc); return ret; } - if (pto_config->en_dis != OSI_ENABLE && - pto_config->en_dis != OSI_DISABLE) { + if ((pto_config->en_dis != OSI_ENABLE) && + (pto_config->en_dis != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid enable flag value\n", (nveul64_t)pto_config->en_dis); return ret; } - if (pto_config->snap_type != OSI_PTP_SNAP_ORDINARY && - pto_config->snap_type != OSI_PTP_SNAP_TRANSPORT && - pto_config->snap_type != OSI_PTP_SNAP_P2P) { + if ((pto_config->snap_type != OSI_PTP_SNAP_ORDINARY) && + (pto_config->snap_type != OSI_PTP_SNAP_TRANSPORT) && + (pto_config->snap_type != OSI_PTP_SNAP_P2P)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid SNAP type value\n", (nveul64_t)pto_config->snap_type); return ret; } - if (pto_config->master != OSI_ENABLE && - pto_config->master != OSI_DISABLE) { + if ((pto_config->master != OSI_ENABLE) && + (pto_config->master != OSI_DISABLE)) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "invalid master flag value\n", (nveul64_t)pto_config->master); @@ -438,29 +667,54 @@ static nve32_t conf_ptp_offload(struct osi_core_priv_data *const osi_core, return ret; } +#endif /* !OSI_STRIPPED_LIB */ -nve32_t osi_l2_filter(struct osi_core_priv_data *const osi_core, - const struct osi_filter *filter) -{ - struct core_local *l_core = (struct core_local *)osi_core; - nve32_t ret; - - if ((validate_args(osi_core, l_core) < 0) || (filter == OSI_NULL)) { - return -1; - } - - if (filter == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "CORE: filter is NULL\n", 0ULL); - return -1; - } - - ret = l_core->ops_p->config_mac_pkt_filter_reg(osi_core, filter); - if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "failed to configure MAC packet filter register\n", - 0ULL); - return ret; +/** + * @brief osi_l2_filter - configure L2 mac filter. + * + * @note + * Algorithm: + * - This sequence is used to configure MAC in different packet + * processing modes like promiscuous, multicast, unicast, + * hash unicast/multicast and perfect/inverse matching for L2 DA + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] filter: OSI filter structure. + * + * @pre + * - MAC should be initialized and started. see osi_start_mac() + * + * @note + * Traceability Details: + * - SWUD_ID: ETHERNET_NVETHERNETRM_018 + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: Yes + * - Run time: Yes + * - De-initialization: No + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t osi_l2_filter(struct osi_core_priv_data *const osi_core, + const struct osi_filter *filter) +{ + struct core_local *l_core = (struct core_local *)(void *)osi_core; + nve32_t ret = 0; + + ret = hw_config_mac_pkt_filter_reg(osi_core, filter); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "failed to configure MAC packet filter register\n", + 0ULL); + goto fail; } if (((filter->oper_mode & OSI_OPER_ADDR_UPDATE) != OSI_NONE) || @@ -472,224 +726,437 @@ nve32_t osi_l2_filter(struct osi_core_priv_data *const osi_core, OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "DCS requested. Conflicts with DT config\n", 0ULL); - return ret; + goto fail; } ret = l_core->ops_p->update_mac_addr_low_high_reg(osi_core, filter); } +fail: return ret; } /** - * @brief helper_l4_filter helper function for l4 filtering + * @brief l3l4_find_match - function to find filter match * - * @param[in] osi_core: OSI Core private data structure. - * @param[in] l_filter: filter structure - * @param[in] type: filter type l3 or l4 - * @param[in] dma_routing_enable: dma routing enable (1) or disable (0) - * @param[in] dma_chan: dma channel + * @note + * Algorithm: + * - Search through filter list l_core->cfg.l3_l4[] and find for a + * match with l3_l4 input data. + * - Filter data matches, store the filter index into filter_no. + * - Store first found filter index into free_filter_no. + * - Return 0 on match. + * - Return -1 on failure. * - * @pre MAC needs to be out of reset and proper clock configured. + * @param[in] l_core: OSI local core data structure. + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * @param[out] filter_no: pointer to filter index + * @param[out] free_filter_no: pointer to free filter index + * @param[in] max_filter_no: maximum allowed filter number * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No + * @pre + * - MAC should be initialized and started. see osi_start_mac() * - * @retval 0 on Success - * @retval -1 on Failure + * @retval 0 on success + * @retval -1 on failure. */ -static inline nve32_t helper_l4_filter( - struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_l3_l4_filter l_filter, - nveu32_t type, - nveu32_t dma_routing_enable, - nveu32_t dma_chan) +static nve32_t l3l4_find_match(const struct core_local *const l_core, + const struct osi_l3_l4_filter *const l3_l4, + nveu32_t *filter_no, + nveu32_t *free_filter_no, + nveu32_t max_filter_no) { - nve32_t ret = 0; + nveu32_t i; + nve32_t ret = -1; + nveu32_t found_free_index = 0; + nve32_t filter_size = (nve32_t)sizeof(l3_l4->data); +#if defined(L3L4_WILDCARD_FILTER) + nveu32_t start_idx = 1; /* leave first one for TCP wildcard */ +#else + nveu32_t start_idx = 0; +#endif /* L3L4_WILDCARD_FILTER */ + + /* init free index value to invalid value */ + *free_filter_no = UINT_MAX; + + for (i = start_idx; i <= max_filter_no; i++) { + if (l_core->cfg.l3_l4[i].filter_enb_dis == OSI_FALSE) { + /* filter not enabled, save free index */ + if (found_free_index == 0U) { + *free_filter_no = i; + found_free_index = 1; + } + continue; + } - ret = ops_p->config_l4_filters(osi_core, - l_filter.filter_no, - l_filter.filter_enb_dis, - type, - l_filter.src_dst_addr_match, - l_filter.perfect_inverse_match, - dma_routing_enable, - dma_chan); - if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "failed to configure L4 filters\n", 0ULL); - return ret; + if (osi_memcmp(&(l_core->cfg.l3_l4[i].data), &(l3_l4->data), + filter_size) != 0) { + /* data do not match */ + continue; + } + + /* found a match */ + ret = 0; + *filter_no = i; + break; } - return ops_p->update_l4_port_no(osi_core, - l_filter.filter_no, - l_filter.port_no, - l_filter.src_dst_addr_match); + return ret; } /** - * @brief helper_l3_filter helper function for l3 filtering + * @brief configure_l3l4_filter_valid_params - parameter validation function for l3l4 configuration * - * @param[in] osi_core: OSI Core private data structure. - * @param[in] l_filter: filter structure - * @param[in] type: filter type l3 or l4 - * @param[in] dma_routing_enable: dma routing enable (1) or disable (0) - * @param[in] dma_chan: dma channel + * @note + * Algorithm: + * - Validate all the l3_l4 structure parameter. + * - Verify routing dma channel id value. + * - Vefify each enable/disable parameters is <= OSI_TRUE. + * - Return -1 if parameter validation fails. + * - Return 0 on success. * - * @pre MAC needs to be out of reset and proper clock configured. + * @param[in] osi_core: OSI core private data structure. + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No + * @pre + * - MAC should be initialized and started. see osi_start_mac() * - * @retval 0 on Success - * @retval -1 on Failure + * @retval 0 on success + * @retval -1 on failure. */ -static inline nve32_t helper_l3_filter( - struct osi_core_priv_data *const osi_core, - struct core_ops *ops_p, - struct osi_l3_l4_filter l_filter, - nveu32_t type, - nveu32_t dma_routing_enable, - nveu32_t dma_chan) +static nve32_t configure_l3l4_filter_valid_params(const struct osi_core_priv_data *const osi_core, + const struct osi_l3_l4_filter *const l3_l4) { - nve32_t ret = 0; + const nveu32_t max_dma_chan[2] = { + OSI_EQOS_MAX_NUM_CHANS, + OSI_MGBE_MAX_NUM_CHANS + }; + nve32_t ret = -1; - ret = ops_p->config_l3_filters(osi_core, - l_filter.filter_no, - l_filter.filter_enb_dis, - type, - l_filter.src_dst_addr_match, - l_filter.perfect_inverse_match, - dma_routing_enable, - dma_chan); - if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "failed to configure L3 filters\n", 0ULL); - return ret; + /* validate dma channel */ + if (l3_l4->dma_chan > max_dma_chan[osi_core->mac]) { + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), + ("L3L4: Wrong DMA channel: "), (l3_l4->dma_chan)); + goto exit_func; } - if (type == OSI_IP6_FILTER) { - ret = ops_p->update_ip6_addr(osi_core, l_filter.filter_no, - l_filter.ip6_addr); - } else if (type == OSI_IP4_FILTER) { - ret = ops_p->update_ip4_addr(osi_core, l_filter.filter_no, - l_filter.ip4_addr, - l_filter.src_dst_addr_match); - } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "Invalid L3 filter type\n", 0ULL); - return -1; + /* valate enb parameters */ + if ((l3_l4->filter_enb_dis +#ifndef OSI_STRIPPED_LIB + | l3_l4->dma_routing_enable | + l3_l4->data.is_udp | + l3_l4->data.is_ipv6 | + l3_l4->data.src.port_match | + l3_l4->data.src.addr_match | + l3_l4->data.dst.port_match | + l3_l4->data.dst.addr_match | + l3_l4->data.src.port_match_inv | + l3_l4->data.src.addr_match_inv | + l3_l4->data.dst.port_match_inv | + l3_l4->data.dst.addr_match_inv +#endif /* !OSI_STRIPPED_LIB */ + ) > OSI_TRUE) { + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), + ("L3L4: one of the enb param > OSI_TRUE: "), 0); + goto exit_func; } +#ifndef OSI_STRIPPED_LIB + /* validate port/addr enb bits */ + if (l3_l4->filter_enb_dis == OSI_TRUE) { + if ((l3_l4->data.src.port_match | l3_l4->data.src.addr_match | + l3_l4->data.dst.port_match | l3_l4->data.dst.addr_match) + == OSI_FALSE) { + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), + ("L3L4: None of the enb bits are not set: "), 0); + goto exit_func; + } + if ((l3_l4->data.is_ipv6 & l3_l4->data.src.addr_match & + l3_l4->data.dst.addr_match) != OSI_FALSE) { + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), + ("L3L4: Both ip6 addr match bits are set\n"), 0); + goto exit_func; + } + } +#endif /* !OSI_STRIPPED_LIB */ + + /* success */ + ret = 0; + +exit_func: + return ret; } -nve32_t osi_l3l4_filter(struct osi_core_priv_data *const osi_core, - const struct osi_l3_l4_filter l_filter, - const nveu32_t type, const nveu32_t dma_routing_enable, - const nveu32_t dma_chan, const nveu32_t is_l4_filter) +/** + * @brief configure_l3l4_filter_helper - helper function for l3l4 configuration + * + * @note + * Algorithm: + * - Confifure l3l4 filter using l_core->ops_p->config_l3l4_filters(). + * Return -1 if config_l3l4_filters() fails. + * - Store the filter into l_core->cfg.l3_l4[] and enable + * l3l4 filter if any of the filter index enabled currently. + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] filter_no: pointer to filter number + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * + * @pre + * - MAC should be initialized and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t configure_l3l4_filter_helper(struct osi_core_priv_data *const osi_core, + nveu32_t filter_no, + const struct osi_l3_l4_filter *const l3_l4) { - struct core_local *l_core = (struct core_local *)osi_core; - nve32_t ret = -1; + struct osi_l3_l4_filter *cfg_l3_l4; + struct core_local *const l_core = (struct core_local *)(void *)osi_core; + nve32_t ret; - if (validate_args(osi_core, l_core) < 0) { - return -1; + ret = l_core->ops_p->config_l3l4_filters(osi_core, filter_no, l3_l4); + if (ret < 0) { + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_HW_FAIL), + ("Failed to config L3L4 filters: "), (filter_no)); + goto exit_func; } - if ((dma_routing_enable == OSI_ENABLE) && - (osi_core->dcs_en != OSI_ENABLE)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, - "dma routing enabled but dcs disabled in DT\n", - 0ULL); - return ret; - } + cfg_l3_l4 = &(l_core->cfg.l3_l4[filter_no]); + if (l3_l4->filter_enb_dis == OSI_TRUE) { + /* Store the filter. + * osi_memcpy is an internal function and it cannot fail, hence + * ignoring return value. + */ + (void)osi_memcpy(cfg_l3_l4, l3_l4, sizeof(struct osi_l3_l4_filter)); + OSI_CORE_INFO((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), + ("L3L4: ADD: "), (filter_no)); - if (is_l4_filter == OSI_ENABLE) { - ret = helper_l4_filter(osi_core, l_core->ops_p, l_filter, type, - dma_routing_enable, dma_chan); + /* update filter mask bit */ + osi_core->l3l4_filter_bitmask |= ((nveu32_t)1U << (filter_no & 0x1FU)); } else { - ret = helper_l3_filter(osi_core, l_core->ops_p, l_filter, type, - dma_routing_enable, dma_chan); - } + /* Clear the filter data. + * osi_memset is an internal function and it cannot fail, hence + * ignoring return value. + */ + (void)osi_memset(cfg_l3_l4, 0, sizeof(struct osi_l3_l4_filter)); + OSI_CORE_INFO((osi_core->osd), (OSI_LOG_ARG_OUTOFBOUND), + ("L3L4: DELETE: "), (filter_no)); - if (ret < 0) { - OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, - "L3/L4 helper function failed\n", 0ULL); - return ret; + /* update filter mask bit */ + osi_core->l3l4_filter_bitmask &= ~((nveu32_t)1U << (filter_no & 0x1FU)); } - if (osi_core->l3l4_filter_bitmask != OSI_DISABLE) { - ret = l_core->ops_p->config_l3_l4_filter_enable(osi_core, - OSI_ENABLE); + if (osi_core->l3l4_filter_bitmask != 0U) { + /* enable l3l4 filter */ + ret = hw_config_l3_l4_filter_enable(osi_core, OSI_ENABLE); } else { - ret = l_core->ops_p->config_l3_l4_filter_enable(osi_core, - OSI_DISABLE); + /* disable l3l4 filter */ + ret = hw_config_l3_l4_filter_enable(osi_core, OSI_DISABLE); } +exit_func: + return ret; } -nve32_t osi_config_rxcsum_offload(struct osi_core_priv_data *const osi_core, - const nveu32_t enable) +#if defined(L3L4_WILDCARD_FILTER) +/** + * @brief l3l4_add_wildcard_filter - function to configure wildcard filter. + * + * @note + * Algorithm: + * - Configure TCP wildcard filter at index 0 using configure_l3l4_filter_helper(). + * + * @param[in] osi_core: OSI Core private data structure. + * @param[in] max_filter_no: maximum allowed filter number + * + * @pre + * - MAC should be initialized and started. see osi_start_mac() + */ +static void l3l4_add_wildcard_filter(struct osi_core_priv_data *const osi_core, + nveu32_t max_filter_no) { - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; + nve32_t err = -1; + struct osi_l3_l4_filter *l3l4_filter; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + + /* use max filter index to confiture wildcard filter */ + if (l_core->l3l4_wildcard_filter_configured != OSI_ENABLE) { + /* Configure TCP wildcard filter at index 0. + * INV IP4 filter with SA (0) + DA (0) with UDP perfect match with + * SP (0) + DP (0) with no routing enabled. + * - TCP packets will have a IP filter match and will be routed to default DMA. + * - UDP packets will have a IP match but no L4 match, hence HW goes for + * next filter index for finding match. + */ + l3l4_filter = &(l_core->cfg.l3_l4[0]); + osi_memset(l3l4_filter, 0, sizeof(struct osi_l3_l4_filter)); + l3l4_filter->filter_enb_dis = OSI_TRUE; + l3l4_filter->data.is_udp = OSI_TRUE; + l3l4_filter->data.src.addr_match = OSI_TRUE; + l3l4_filter->data.src.addr_match_inv = OSI_TRUE; + l3l4_filter->data.src.port_match = OSI_TRUE; + l3l4_filter->data.dst.addr_match = OSI_TRUE; + l3l4_filter->data.dst.addr_match_inv = OSI_TRUE; + l3l4_filter->data.dst.port_match = OSI_TRUE; + + /* configure wildcard at last filter index */ + err = configure_l3l4_filter_helper(osi_core, 0, l3l4_filter); + if (err < 0) { + /* wildcard config failed */ + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_INVALID), + ("L3L4: TCP wildcard config failed: "), (0UL)); + } } - return l_core->ops_p->config_rxcsum_offload(osi_core, enable); + if (err >= 0) { + /* wildcard config success */ + l_core->l3l4_wildcard_filter_configured = OSI_ENABLE; + OSI_CORE_INFO((osi_core->osd), (OSI_LOG_ARG_INVALID), + ("L3L4: Wildcard config success"), (0UL)); + } } +#endif /* L3L4_WILDCARD_FILTER */ -nve32_t osi_set_systime_to_mac(struct osi_core_priv_data *const osi_core, - const nveu32_t sec, const nveu32_t nsec) +/** + * @brief configure_l3l4_filter - function to configure l3l4 filter. + * + * @note + * Algorithm: + * - Validate all the l3_l4 structure parameter using configure_l3l4_filter_valid_params(). + * Return -1 if parameter validation fails. + * - For filter enable case, + * -> If filter already enabled, return -1 to report error. + * -> Otherwise find free index and configure filter using configure_l3l4_filter_helper(). + * - For filter disable case, + * -> If filter match not found, return 0 to report caller that filter already removed. + * -> Otherwise disable filter using configure_l3l4_filter_helper(). + * - Return -1 if configure_l3l4_filter_helper() fails. + * - Return 0 on success. + * + * @param[in] osi_core: OSI Core private data structure. + * @param[in] l3_l4: Pointer to l3 l4 filter structure (#osi_l3_l4_filter) + * + * @pre + * - MAC should be initialized and started. see osi_start_mac() + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t configure_l3l4_filter(struct osi_core_priv_data *const osi_core, + const struct osi_l3_l4_filter *const l3_l4) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t err; + nveu32_t filter_no = 0; + nveu32_t free_filter_no = UINT_MAX; + const struct core_local *l_core = (struct core_local *)(void *)osi_core; + const nveu32_t max_filter_no[2] = { + EQOS_MAX_L3_L4_FILTER - 1U, + OSI_MGBE_MAX_L3_L4_FILTER - 1U, + }; + nve32_t ret = -1; - if (validate_args(osi_core, l_core) < 0) { - return -1; + if (configure_l3l4_filter_valid_params(osi_core, l3_l4) < 0) { + /* parameter validation failed */ + goto exit_func; } - return l_core->ops_p->set_systime_to_mac(osi_core, sec, nsec); + /* search for a duplicate filter request or find for free index */ + err = l3l4_find_match(l_core, l3_l4, &filter_no, &free_filter_no, + max_filter_no[osi_core->mac]); + + if (l3_l4->filter_enb_dis == OSI_TRUE) { + if (err == 0) { + /* duplicate filter request */ + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_HW_FAIL), + ("L3L4: Failed: duplicate filter: "), (filter_no)); + goto exit_func; + } + + /* check free index */ + if (free_filter_no > max_filter_no[osi_core->mac]) { + /* no free entry found */ + OSI_CORE_INFO((osi_core->osd), (OSI_LOG_ARG_HW_FAIL), + ("L3L4: Failed: no free filter: "), (free_filter_no)); + goto exit_func; + } + filter_no = free_filter_no; + } else { + if (err < 0) { + /* no match found */ + OSI_CORE_INFO((osi_core->osd), (OSI_LOG_ARG_HW_FAIL), + ("L3L4: delete: no filter match: "), (filter_no)); + /* filter already deleted, return success */ + ret = 0; + goto exit_func; + } + } + +#if defined(L3L4_WILDCARD_FILTER) + /* setup l3l4 wildcard filter for l3l4 */ + l3l4_add_wildcard_filter(osi_core, max_filter_no[osi_core->mac]); + if (l_core->l3l4_wildcard_filter_configured != OSI_ENABLE) { + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_HW_FAIL), + ("L3L4: Rejected: wildcard is not enabled: "), (filter_no)); + goto exit_func; + } +#endif /* L3L4_WILDCARD_FILTER */ + + /* configure l3l4 filter */ + err = configure_l3l4_filter_helper(osi_core, filter_no, l3_l4); + if (err < 0) { + /* filter config failed */ + OSI_CORE_ERR((osi_core->osd), (OSI_LOG_ARG_HW_FAIL), + ("L3L4: configure_l3l4_filter_helper() failed"), (filter_no)); + goto exit_func; + } + + /* success */ + ret = 0; + +exit_func: + + return ret; } /** - * @brief div_u64 - Calls a function which returns quotient + * @brief osi_adjust_freq - Adjust frequency * - * @param[in] dividend: Dividend - * @param[in] divisor: Divisor + * @note + * Algorithm: + * - Adjust a drift of +/- comp nanoseconds per second. + * "Compensation" is the difference in frequency between + * the master and slave clocks in Parts Per Billion. * - * @pre MAC IP should be out of reset and need to be initialized as the - * requirements. + * @param[in] osi_core: OSI core private data structure. + * @param[in] ppb: Parts per Billion * + * @pre MAC should be init and started. see osi_start_mac() * * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * @returns Quotient + * Traceability Details: + * - SWUD_ID: ETHERNET_NVETHERNETRM_023 + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: No + * - Run time: Yes + * - De-initialization: No + * + * @retval 0 on success + * @retval -1 on failure. */ -static inline nveu64_t div_u64(nveu64_t dividend, - nveu64_t divisor) +static nve32_t osi_adjust_freq(struct osi_core_priv_data *const osi_core, nve32_t ppb) { - nveu64_t remain; - - return div_u64_rem(dividend, divisor, &remain); -} - -nve32_t osi_adjust_freq(struct osi_core_priv_data *const osi_core, nve32_t ppb) -{ - struct core_local *l_core = (struct core_local *)osi_core; - nveu64_t adj; nveu64_t temp; nveu32_t diff = 0; @@ -698,10 +1165,6 @@ nve32_t osi_adjust_freq(struct osi_core_priv_data *const osi_core, nve32_t ppb) nve32_t ret = -1; nve32_t ppb1 = ppb; - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - addend = osi_core->default_addend; if (ppb1 < 0) { neg_adj = 1U; @@ -719,18 +1182,18 @@ nve32_t osi_adjust_freq(struct osi_core_priv_data *const osi_core, nve32_t ppb) if (temp < UINT_MAX) { diff = (nveu32_t)temp; } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, "temp > UINT_MAX\n", - 0ULL); - return ret; + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "temp > UINT_MAX\n", + (nvel64_t)temp); + goto fail; } if (neg_adj == 0U) { if (addend <= (UINT_MAX - diff)) { addend = (addend + diff); } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "addend > UINT_MAX\n", 0ULL); - return ret; + goto fail; } } else { if (addend > diff) { @@ -738,29 +1201,30 @@ nve32_t osi_adjust_freq(struct osi_core_priv_data *const osi_core, nve32_t ppb) } else if (addend < diff) { addend = diff - addend; } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "addend = diff\n", 0ULL); } } - return l_core->ops_p->config_addend(osi_core, addend); + ret = hw_config_addend(osi_core, addend); + +fail: + return ret; } -nve32_t osi_adjust_time(struct osi_core_priv_data *const osi_core, - nvel64_t nsec_delta) +static nve32_t osi_adjust_time(struct osi_core_priv_data *const osi_core, + nvel64_t nsec_delta) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; nveu32_t neg_adj = 0; nveu32_t sec = 0, nsec = 0; + nveu32_t cur_sec = 0, cur_nsec = 0; nveu64_t quotient; nveu64_t reminder = 0; nveu64_t udelta = 0; nve32_t ret = -1; nvel64_t nsec_delta1 = nsec_delta; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } + nvel64_t calculate; if (nsec_delta1 < 0) { neg_adj = 1; @@ -774,119 +1238,47 @@ nve32_t osi_adjust_time(struct osi_core_priv_data *const osi_core, if (quotient <= UINT_MAX) { sec = (nveu32_t)quotient; } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "quotient > UINT_MAX\n", 0ULL); - return ret; + goto fail; } if (reminder <= UINT_MAX) { nsec = (nveu32_t)reminder; } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "reminder > UINT_MAX\n", 0ULL); - return ret; + goto fail; } - return l_core->ops_p->adjust_mactime(osi_core, sec, nsec, neg_adj, - osi_core->ptp_config.one_nsec_accuracy); -} - -nve32_t osi_ptp_configuration(struct osi_core_priv_data *const osi_core, - const nveu32_t enable) -{ - struct core_local *l_core = (struct core_local *)osi_core; - nve32_t ret = 0; - nveu64_t temp = 0, temp1 = 0, temp2 = 0; - nveu64_t ssinc = 0; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - if (enable == OSI_DISABLE) { - /* disable hw time stamping */ - /* Program MAC_Timestamp_Control Register */ - l_core->ops_p->config_tscr(osi_core, OSI_DISABLE); - /* Disable PTP RX Queue routing */ - ret = l_core->ops_p->config_ptp_rxq(osi_core, - osi_core->ptp_config.ptp_rx_queue, - OSI_DISABLE); - } else { - /* Program MAC_Timestamp_Control Register */ - l_core->ops_p->config_tscr(osi_core, - osi_core->ptp_config.ptp_filter); - - if (osi_core->pre_si == OSI_ENABLE) { - if (osi_core->mac == OSI_MAC_HW_MGBE) { - /* FIXME: Pass it from OSD */ - osi_core->ptp_config.ptp_clock = 78125000U; - osi_core->ptp_config.ptp_ref_clk_rate = - 78125000U; - } else { - /* FIXME: Pass it from OSD */ - osi_core->ptp_config.ptp_clock = 312500000U; - osi_core->ptp_config.ptp_ref_clk_rate = - 312500000U; - } - } - /* Program Sub Second Increment Register */ - l_core->ops_p->config_ssir(osi_core, - osi_core->ptp_config.ptp_clock); - - /* formula for calculating addend value is - * TSAR = (2^32 * 1000) / (ptp_ref_clk_rate in MHz * SSINC) - * 2^x * y == (y << x), hence - * 2^32 * 1000 == (1000 << 32) - * so addend = (2^32 * 1000)/(ptp_ref_clk_rate in MHZ * SSINC); - */ - if ((osi_core->pre_si == OSI_ENABLE) && - ((osi_core->mac == OSI_MAC_HW_MGBE) || - (osi_core->mac_ver <= OSI_EQOS_MAC_4_10))) { - ssinc = OSI_PTP_SSINC_16; - } else { - ssinc = OSI_PTP_SSINC_4; - if (osi_core->mac_ver == OSI_EQOS_MAC_5_30) { - ssinc = OSI_PTP_SSINC_6; - } - } - - temp = ((nveu64_t)1000 << 32); - temp = (nveu64_t)temp * 1000000U; - - temp1 = div_u64(temp, - (nveu64_t)osi_core->ptp_config.ptp_ref_clk_rate); - - temp2 = div_u64(temp1, (nveu64_t)ssinc); + common_get_systime_from_mac(osi_core->base, + osi_core->mac, &cur_sec, &cur_nsec); + calculate = ((nvel64_t)cur_sec * OSI_NSEC_PER_SEC_SIGNED) + (nvel64_t)cur_nsec; - if (temp2 < UINT_MAX) { - osi_core->default_addend = (nveu32_t)temp2; - } else { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "core: temp2 >= UINT_MAX\n", 0ULL); - return -1; + if (neg_adj == 1U) { + if ((calculate + nsec_delta) < 0LL) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Wrong delta, put time in -ve\n", 0ULL); + ret = -1; + goto fail; } - - /* Program addend value */ - ret = l_core->ops_p->config_addend(osi_core, - osi_core->default_addend); - - /* Set current time */ - if (ret == 0) { - ret = l_core->ops_p->set_systime_to_mac(osi_core, - osi_core->ptp_config.sec, - osi_core->ptp_config.nsec); - if (ret == 0) { - /* Enable PTP RX Queue routing */ - ret = l_core->ops_p->config_ptp_rxq(osi_core, - osi_core->ptp_config.ptp_rx_queue, - OSI_ENABLE); - } + } else { + /* Addition of 2 sec for compensate Max nanosec factors*/ + if (cur_sec > (UINT_MAX - sec - 2U)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Not Supported sec beyond UINT_max\n", 0ULL); + ret = -1; + goto fail; } } + ret = l_core->ops_p->adjust_mactime(osi_core, sec, nsec, neg_adj, + osi_core->ptp_config.one_nsec_accuracy); +fail: return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief rxq_route_config - Enable PTP RX packets routing * @@ -904,7 +1296,7 @@ nve32_t osi_ptp_configuration(struct osi_core_priv_data *const osi_core, static nve32_t rxq_route_config(struct osi_core_priv_data *const osi_core, const struct osi_rxq_route *rxq_route) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if (rxq_route->route_type != OSI_RXQ_ROUTE_PTP) { OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, @@ -918,96 +1310,6 @@ static nve32_t rxq_route_config(struct osi_core_priv_data *const osi_core, rxq_route->enable); } -nve32_t osi_read_mmc(struct osi_core_priv_data *const osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - l_core->ops_p->read_mmc(osi_core); - - return 0; -} - -nve32_t osi_get_mac_version(struct osi_core_priv_data *const osi_core, - nveu32_t *mac_ver) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - if (mac_ver == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "mac_ver is NULL\n", 0ULL); - return -1; - } - - *mac_ver = ((l_core->ops_p->read_reg(osi_core, (nve32_t)MAC_VERSION)) & - MAC_VERSION_SNVER_MASK); - - if (validate_mac_ver_update_chans(*mac_ver, &l_core->max_chans) == 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "Invalid MAC version\n", (nveu64_t)*mac_ver) - return -1; - } - - return 0; -} - -#ifndef OSI_STRIPPED_LIB -/** - * @brief validate_core_regs - Read-validate HW registers for func safety. - * - * @note - * Algorithm: - * - Reads pre-configured list of MAC/MTL configuration registers - * and compares with last written value for any modifications. - * - * @param[in] osi_core: OSI core private data structure. - * - * @pre - * - MAC has to be out of reset. - * - osi_hal_hw_core_init has to be called. Internally this would initialize - * the safety_config (see osi_core_priv_data) based on MAC version and - * which specific registers needs to be validated periodically. - * - Invoke this call if (osi_core_priv_data->safety_config != OSI_NULL) - * - * @note - * Traceability Details: - * - * @note - * Classification: - * - Interrupt: No - * - Signal handler: No - * - Thread safe: No - * - Required Privileges: None - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t validate_core_regs(struct osi_core_priv_data *const osi_core) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (osi_core->safety_config == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "CORE: Safety config is NULL\n", 0ULL); - return -1; - } - - return l_core->ops_p->validate_regs(osi_core); -} - /** * @brief vlan_id_update - invoke osi call to update VLAN ID * @@ -1042,9 +1344,9 @@ static nve32_t validate_core_regs(struct osi_core_priv_data *const osi_core) static nve32_t vlan_id_update(struct osi_core_priv_data *const osi_core, const nveu32_t vid) { - struct core_local *l_core = (struct core_local *)osi_core; - unsigned int action = vid & VLAN_ACTION_MASK; - unsigned short vlan_id = vid & VLAN_VID_MASK; + struct core_local *const l_core = (struct core_local *)(void *)osi_core; + nveu32_t action = vid & VLAN_ACTION_MASK; + nveu16_t vlan_id = (nveu16_t)(vid & VLAN_VID_MASK); if ((osi_core->mac_ver == OSI_EQOS_MAC_4_10) || (osi_core->mac_ver == OSI_EQOS_MAC_5_00)) { @@ -1055,7 +1357,7 @@ static nve32_t vlan_id_update(struct osi_core_priv_data *const osi_core, if (((action != OSI_VLAN_ACTION_ADD) && (action != OSI_VLAN_ACTION_DEL)) || (vlan_id >= VLAN_NUM_VID)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: Invalid action/vlan_id\n", 0ULL); /* Unsupported action */ return -1; @@ -1101,12 +1403,12 @@ static nve32_t vlan_id_update(struct osi_core_priv_data *const osi_core, static nve32_t conf_eee(struct osi_core_priv_data *const osi_core, nveu32_t tx_lpi_enabled, nveu32_t tx_lpi_timer) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; if ((tx_lpi_timer >= OSI_MAX_TX_LPI_TIMER) || (tx_lpi_timer <= OSI_MIN_TX_LPI_TIMER) || ((tx_lpi_timer % OSI_MIN_TX_LPI_TIMER) != OSI_NONE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Invalid Tx LPI timer value\n", (nveul64_t)tx_lpi_timer); return -1; @@ -1118,14 +1420,19 @@ static nve32_t conf_eee(struct osi_core_priv_data *const osi_core, } /** - * @brief configure_frp - Configure the FRP offload entry in the - * Instruction Table. + * @brief config_arp_offload - Configure ARP offload in MAC. + * + * @note + * Algorithm: + * - Invokes EQOS config ARP offload routine. * * @param[in] osi_core: OSI core private data structure. - * @param[in] cmd: FRP command data structure. + * @param[in] flags: Enable/disable flag. + * @param[in] ip_addr: Char array representation of IP address * * @pre - * - MAC and PHY should be init and started. see osi_start_mac() + * - MAC should be init and started. see osi_start_mac() + * - Valid 4 byte IP address as argument ip_addr * * @note * Traceability Details: @@ -1146,42 +1453,38 @@ static nve32_t conf_eee(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static int configure_frp(struct osi_core_priv_data *const osi_core, - struct osi_core_frp_cmd *const cmd) +static nve32_t conf_arp_offload(struct osi_core_priv_data *const osi_core, + const nveu32_t flags, + const nveu8_t *ip_addr) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; - if (cmd == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "Invalid argment\n", OSI_NONE); + if (ip_addr == OSI_NULL) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: ip_addr is NULL\n", 0ULL); return -1; } - /* Check for supported MAC version */ - if ((osi_core->mac == OSI_MAC_HW_EQOS) && - (osi_core->mac_ver < OSI_EQOS_MAC_5_10)) { - OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, - "MAC doesn't support FRP\n", OSI_NONE); + if ((flags != OSI_ENABLE) && (flags != OSI_DISABLE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid ARP offload enable/disable flag\n", 0ULL); return -1; } - return setup_frp(osi_core, l_core->ops_p, cmd); + return l_core->ops_p->config_arp_offload(osi_core, flags, ip_addr); } /** - * @brief config_arp_offload - Configure ARP offload in MAC. + * @brief conf_mac_loopback - Configure MAC loopback * * @note * Algorithm: - * - Invokes EQOS config ARP offload routine. + * - Configure the MAC to support the loopback. * * @param[in] osi_core: OSI core private data structure. - * @param[in] flags: Enable/disable flag. - * @param[in] ip_addr: Char array representation of IP address + * @param[in] lb_mode: Enable or disable MAC loopback * - * @pre - * - MAC should be init and started. see osi_start_mac() - * - Valid 4 byte IP address as argument ip_addr + * @pre MAC should be init and started. see osi_start_mac() * * @note * Traceability Details: @@ -1201,39 +1504,32 @@ static int configure_frp(struct osi_core_priv_data *const osi_core, * * @retval 0 on success * @retval -1 on failure. - */ -static nve32_t conf_arp_offload(struct osi_core_priv_data *const osi_core, - const nveu32_t flags, - const nveu8_t *ip_addr) + */ +static nve32_t conf_mac_loopback(struct osi_core_priv_data *const osi_core, + const nveu32_t lb_mode) { - struct core_local *l_core = (struct core_local *)osi_core; - - if (ip_addr == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "CORE: ip_addr is NULL\n", 0ULL); - return -1; - } + struct core_local *l_core = (struct core_local *)(void *)osi_core; - if ((flags != OSI_ENABLE) && (flags != OSI_DISABLE)) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "Invalid ARP offload enable/disable flag\n", 0ULL); + /* don't allow only if loopback mode is other than 0 or 1 */ + if ((lb_mode != OSI_ENABLE) && (lb_mode != OSI_DISABLE)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "Invalid loopback mode\n", 0ULL); return -1; } - return l_core->ops_p->config_arp_offload(osi_core, flags, ip_addr); + return l_core->ops_p->config_mac_loopback(osi_core, lb_mode); } +#endif /* !OSI_STRIPPED_LIB */ /** - * @brief conf_mac_loopback - Configure MAC loopback - * - * @note - * Algorithm: - * - Configure the MAC to support the loopback. + * @brief configure_frp - Configure the FRP offload entry in the + * Instruction Table. * * @param[in] osi_core: OSI core private data structure. - * @param[in] lb_mode: Enable or disable MAC loopback + * @param[in] cmd: FRP command data structure. * - * @pre MAC should be init and started. see osi_start_mac() + * @pre + * - MAC and PHY should be init and started. see osi_start_mac() * * @note * Traceability Details: @@ -1254,21 +1550,32 @@ static nve32_t conf_arp_offload(struct osi_core_priv_data *const osi_core, * @retval 0 on success * @retval -1 on failure. */ -static nve32_t conf_mac_loopback(struct osi_core_priv_data *const osi_core, - const nveu32_t lb_mode) +static nve32_t configure_frp(struct osi_core_priv_data *const osi_core, + struct osi_core_frp_cmd *const cmd) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + nve32_t ret; - /* don't allow only if loopback mode is other than 0 or 1 */ - if (lb_mode != OSI_ENABLE && lb_mode != OSI_DISABLE) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "Invalid loopback mode\n", 0ULL); - return -1; + if (cmd == OSI_NULL) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "FRP command invalid\n", 0ULL); + ret = -1; + goto done; } - return l_core->ops_p->config_mac_loopback(osi_core, lb_mode); + /* Check for supported MAC version */ + if ((osi_core->mac == OSI_MAC_HW_EQOS) && + (osi_core->mac_ver < OSI_EQOS_MAC_5_30)) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "MAC doesn't support FRP\n", OSI_NONE); + ret = -1; + goto done; + } + + ret = setup_frp(osi_core, l_core->ops_p, cmd); +done: + return ret; } -#endif /* !OSI_STRIPPED_LIB */ /** * @brief config_est - Read Setting for GCL from input and update @@ -1313,23 +1620,28 @@ static nve32_t conf_mac_loopback(struct osi_core_priv_data *const osi_core, static nve32_t config_est(struct osi_core_priv_data *osi_core, struct osi_est_config *est) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t ret; if (est == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "EST data is NULL", 0ULL); - return -1; + ret = -1; + goto done; } if ((osi_core->flow_ctrl & OSI_FLOW_CTRL_TX) == OSI_FLOW_CTRL_TX) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "TX Flow control enabled, please disable it", 0ULL); - return -1; + ret = -1; + goto done; } - return l_core->ops_p->hw_config_est(osi_core, est); + ret = hw_config_est(osi_core, est); + +done: + return ret; } /** @@ -1368,15 +1680,19 @@ static nve32_t config_est(struct osi_core_priv_data *osi_core, static nve32_t config_fpe(struct osi_core_priv_data *osi_core, struct osi_fpe_config *fpe) { - struct core_local *l_core = (struct core_local *)osi_core; + nve32_t ret; if (fpe == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "FPE data is NULL", 0ULL); - return -1; + ret = -1; + goto done; } - return l_core->ops_p->hw_config_fpe(osi_core, fpe); + ret = hw_config_fpe(osi_core, fpe); + +done: + return ret; } /** @@ -1393,7 +1709,7 @@ static nve32_t config_fpe(struct osi_core_priv_data *osi_core, static inline void free_tx_ts(struct osi_core_priv_data *osi_core, nveu32_t chan) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; struct osi_core_tx_ts *head = &l_core->tx_ts_head; struct osi_core_tx_ts *temp = l_core->tx_ts_head.next; nveu32_t count = 0U; @@ -1410,6 +1726,29 @@ static inline void free_tx_ts(struct osi_core_priv_data *osi_core, } } +/** + * @brief Return absolute difference + * Algorithm: + * - calculate absolute positive difference + * + * @param[in] a - First input argument + * @param[in] b - Second input argument + * + * @retval absolute difference + */ +static inline nveul64_t eth_abs(nveul64_t a, nveul64_t b) +{ + nveul64_t temp = 0ULL; + + if (a > b) { + temp = (a - b); + } else { + temp = (b - a); + } + + return temp; +} + /** * @brief Parses internal ts structure array and update time stamp if packet * id matches. @@ -1427,24 +1766,48 @@ static inline void free_tx_ts(struct osi_core_priv_data *osi_core, static inline nve32_t get_tx_ts(struct osi_core_priv_data *osi_core, struct osi_core_tx_ts *ts) { - struct core_local *l_core = (struct core_local *)osi_core; + struct core_local *l_core = (struct core_local *)(void *)osi_core; struct osi_core_tx_ts *temp = l_core->tx_ts_head.next; - struct osi_core_tx_ts *head = &l_core->tx_ts_head; + struct osi_core_tx_ts const *head = &l_core->tx_ts_head; nve32_t ret = -1; nveu32_t count = 0U; + nveu32_t nsec, sec, temp_nsec; + nveul64_t temp_val = 0ULL; + nveul64_t ts_val = 0ULL; + + common_get_systime_from_mac(osi_core->base, osi_core->mac, &sec, &nsec); + ts_val = (sec * OSI_NSEC_PER_SEC) + nsec; if (__sync_fetch_and_add(&l_core->ts_lock, 1) == 1U) { /* mask return as initial value is returned always */ (void)__sync_fetch_and_sub(&l_core->ts_lock, 1); - osi_core->xstats.ts_lock_del_fail = +#ifndef OSI_STRIPPED_LIB + osi_core->stats.ts_lock_del_fail = osi_update_stats_counter( - osi_core->xstats.ts_lock_del_fail, 1U); + osi_core->stats.ts_lock_del_fail, 1U); +#endif goto done; } while ((temp != head) && (count < MAX_TX_TS_CNT)) { - if ((temp->pkt_id == ts->pkt_id) && + temp_nsec = temp->nsec & ETHER_NSEC_MASK; + temp_val = (temp->sec * OSI_NSEC_PER_SEC) + temp_nsec; + + if ((eth_abs(ts_val, temp_val) > OSI_NSEC_PER_SEC) && (temp->in_use != OSI_NONE)) { + /* remove old node from the link */ + temp->next->prev = temp->prev; + temp->prev->next = temp->next; + /* Clear in_use fields */ + temp->in_use = OSI_DISABLE; + OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_INVALID, + "Removing stale TS from queue pkt_id\n", + (nveul64_t)temp->pkt_id); + count++; + temp = temp->next; + continue; + } else if ((temp->pkt_id == ts->pkt_id) && + (temp->in_use != OSI_NONE)) { ts->sec = temp->sec; ts->nsec = temp->nsec; /* remove temp node from the link */ @@ -1454,7 +1817,10 @@ static inline nve32_t get_tx_ts(struct osi_core_priv_data *osi_core, temp->in_use = OSI_DISABLE; ret = 0; break; + } else { + /* empty case */ } + count++; temp = temp->next; } @@ -1465,246 +1831,642 @@ static inline nve32_t get_tx_ts(struct osi_core_priv_data *osi_core, return ret; } -#if DRIFT_CAL /** - * @brief read time counters from HW register - * + * @brief calculate time drift between primary and secondary + * interface and update current time. * Algorithm: - * - read HW time counters and take care of roll-over + * - Get drift using last difference = 0 and + * current differance as MGBE time - EQOS time + * drift = current differance with which EQOS should + * update. + * + * @param[in] osi_core: OSI core data structure for primary interface. + * @param[in] sec_osi_core: OSI core data structure for seconday interface. + * @param[out] primary_time: primary interface time pointer + * @param[out] secondary_time: Secondary interface time pointer * - * @param[in] addr: base address - * @param[in] mac: IP type - * @param[out] sec: sec counter - * @param[out] nsec: nsec counter + * @retval calculated drift value */ -static void read_sec_ns(void *addr, nveu32_t mac, - nveu32_t *sec, - nveu32_t *nsec) +static inline nvel64_t dirft_calculation(struct osi_core_priv_data *const osi_core, + struct osi_core_priv_data *const sec_osi_core, + nvel64_t *primary_time, + nvel64_t *secondary_time) { - nveu32_t ns1, ns2; - nveu32_t time_reg_offset[][2] = {{EQOS_SEC_OFFSET, EQOS_NSEC_OFFSET}, - {MGBE_SEC_OFFSET, MGBE_NSEC_OFFSET}}; + nve32_t ret; + nveu32_t sec = 0x0; + nveu32_t nsec = 0x0; + nveu32_t secondary_sec = 0x0; + nveu32_t secondary_nsec = 0x0; + nvel64_t val = 0LL; + nveul64_t temp = 0x0U; + nveul64_t time1 = 0x0U; + nveul64_t time2 = 0x0U; + struct osi_core_ptp_tsc_data ptp_tsc1; + struct osi_core_ptp_tsc_data ptp_tsc2; + + ret = hw_ptp_tsc_capture(osi_core, &ptp_tsc1); + if (ret != 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: TSC PTP capture failed for primary\n", 0ULL); + goto fail; + } + + ret = hw_ptp_tsc_capture(sec_osi_core, &ptp_tsc2); + if (ret != 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: TSC PTP capture failed for secondary\n", 0ULL); + goto fail; + } - ns1 = osi_readl((nveu8_t *)addr + time_reg_offset[mac][1]); - ns1 = (ns1 & ETHER_NSEC_MASK); + time1 = ((nveul64_t)((nveul64_t)ptp_tsc1.tsc_high_bits << 32) + + (nveul64_t)ptp_tsc1.tsc_low_bits); + sec = ptp_tsc1.ptp_high_bits; + nsec = ptp_tsc1.ptp_low_bits; + if ((OSI_LLONG_MAX - (nvel64_t)nsec) > ((nvel64_t)sec * OSI_NSEC_PER_SEC_SIGNED)) { + *primary_time = ((nvel64_t)sec * OSI_NSEC_PER_SEC_SIGNED) + (nvel64_t)nsec; + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: Negative primary PTP time\n", 0ULL); + goto fail; + } - *sec = osi_readl((nveu8_t *)addr + time_reg_offset[mac][0]); + time2 = ((nveul64_t)((nveul64_t)ptp_tsc2.tsc_high_bits << 32) + + (nveul64_t)ptp_tsc2.tsc_low_bits); + secondary_sec = ptp_tsc2.ptp_high_bits; + secondary_nsec = ptp_tsc2.ptp_low_bits; - ns2 = osi_readl((nveu8_t *)addr + time_reg_offset[mac][1]); - ns2 = (ns2 & ETHER_NSEC_MASK); + if ((OSI_LLONG_MAX - (nvel64_t)secondary_nsec) > + ((nvel64_t)secondary_sec * OSI_NSEC_PER_SEC_SIGNED)) { + *secondary_time = ((nvel64_t)secondary_sec * OSI_NSEC_PER_SEC_SIGNED) + + (nvel64_t)secondary_nsec; + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: Negative secondary PTP time\n", 0ULL); + goto fail; + } - /* if ns1 is greater than ns2, it means nsec counter rollover - * happened. In that case read the updated sec counter again + if (time2 > time1) { + temp = time2 - time1; + if ((OSI_LLONG_MAX - (nvel64_t)temp) > *secondary_time) { + *secondary_time -= (nvel64_t)temp; + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: sec time crossing limit\n", 0ULL); + goto fail; + } + } else if (time1 >= time2) { + temp = time1 - time2; + if ((OSI_LLONG_MAX - (nvel64_t)temp) > *secondary_time) { + *secondary_time += (nvel64_t)temp; + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: sec time crossing limit\n", 0ULL); + goto fail; + } + } else { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: wrong drift\n", 0ULL); + goto fail; + } + /* 0 is lowest possible valid time value which represent + * 1 Jan, 1970 */ - if (ns1 >= ns2) { - *sec = osi_readl((nveu8_t *)addr + time_reg_offset[mac][0]); - *nsec = ns2; + if ((*primary_time >= 0) && (*secondary_time >= 0)) { + val = (*primary_time - *secondary_time); } else { - *nsec = ns1; + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: negative time\n", 0ULL); + goto fail; } + +fail: + return val; } /** - * @brief calculate time drift between primary and secondary - * interface. + * @brief calculate frequency adjustment between primary and secondary + * controller. * Algorithm: - * - Get drift using last difference = 0 and - * current differance as MGBE time - EQOS time - * drift = current differance with which EQOS should - * update. + * - Convert Offset between primary and secondary interface to + * frequency adjustment value. * - * @param[in] sec: primary interface sec counter - * @param[in] nsec: primary interface nsec counter - * @param[in] secondary_sec: Secondary interface sec counter - * @param[in] secondary_nsec: Secondary interface nsec counter + * @param[in] sec_osi_core: secondary interface osi core pointer + * @param[in] offset: offset btween primary and secondary interface + * @param[in] secondary_time: Secondary interface time in ns * - * @retval calculated drift value + * @retval calculated frequency adjustment value in ppb */ -static inline nvel64_t dirft_calculation(nveu32_t sec, nveu32_t nsec, - nveu32_t secondary_sec, - nveu32_t secondary_nsec) +static inline nve32_t freq_offset_calculate(struct osi_core_priv_data *sec_osi_core, + nvel64_t offset, + nvel64_t secondary_time) { - nvel64_t val = 0LL; + struct core_ptp_servo *s; + struct core_local *secondary_osi_lcore = (struct core_local *)(void *)sec_osi_core; + nvel64_t ki_term, ppb = 0; + nvel64_t cofficient; + + s = &secondary_osi_lcore->serv; + ppb = s->last_ppb; + + /* if drift is too big in positive / negative don't take any action, + * it should be corrected with adjust time + * threshold value 1 sec + */ + if ((offset >= 1000000000LL) || (offset <= -1000000000LL)) { + s->count = SERVO_STATS_0; /* JUMP */ + s->drift = 0; + s->last_ppb = 0; + goto fail; + } + + switch (s->count) { + case SERVO_STATS_0: + s->offset[0] = offset; + s->local[0] = secondary_time; + s->count = SERVO_STATS_1; + break; + + case SERVO_STATS_1: + s->offset[1] = offset; + s->local[1] = secondary_time; + + /* Make sure the first sample is older than the second. */ + if (s->local[0] >= s->local[1]) { + s->offset[0] = s->offset[1]; + s->local[0] = s->local[1]; + s->count = SERVO_STATS_0; + break; + } + + /* Adjust drift by the measured frequency offset. */ + cofficient = (1000000000LL - s->drift) / (s->local[1] - s->local[0]); + if ((cofficient == 0) || + (((cofficient < 0) && (s->offset[1] < 0)) && + ((OSI_LLONG_MAX / cofficient) < s->offset[1])) || + ((cofficient < 0) && ((-OSI_LLONG_MAX / cofficient) > s->offset[1])) || + ((s->offset[1] < 0) && ((-OSI_LLONG_MAX / cofficient) > s->offset[1]))) { + /* do nothing */ + } else { + + if (((s->drift >= 0) && ((OSI_LLONG_MAX - s->drift) < (cofficient * s->offset[1]))) || + ((s->drift < 0) && ((-OSI_LLONG_MAX - s->drift) > (cofficient * s->offset[1])))) { + /* Do nothing */ + } else { + s->drift += cofficient * s->offset[1]; + } + } + /* update this with constant */ + if (s->drift < MAX_FREQ_NEG) { + s->drift = MAX_FREQ_NEG; + } else if (s->drift > MAX_FREQ_POS) { + s->drift = MAX_FREQ_POS; + } else { + /* Do Nothing */ + } + + ppb = s->drift; + s->count = SERVO_STATS_2; + s->offset[0] = s->offset[1]; + s->local[0] = s->local[1]; + break; + + case SERVO_STATS_2: + s->offset[1] = offset; + s->local[1] = secondary_time; + if (s->local[0] >= s->local[1]) { + s->offset[0] = s->offset[1]; + s->local[0] = s->local[1]; + s->count = SERVO_STATS_0; + break; + } + + cofficient = (1000000000LL) / (s->local[1] - s->local[0]); + + if ((cofficient != 0) && (offset < 0) && + (((offset / WEIGHT_BY_10) < (-OSI_LLONG_MAX / (s->const_i * cofficient))) || + ((offset / WEIGHT_BY_10) < (-OSI_LLONG_MAX / (s->const_p * cofficient))))) { + s->count = SERVO_STATS_0; + break; + } + + if ((cofficient != 0) && (offset > 0) && + (((offset / WEIGHT_BY_10) > (OSI_LLONG_MAX / (cofficient * s->const_i))) || + ((offset / WEIGHT_BY_10) > (OSI_LLONG_MAX / (cofficient * s->const_p))))) { + s->count = SERVO_STATS_0; + break; + } + + /* calculate ppb */ + ki_term = ((s->const_i * cofficient * offset) / WEIGHT_BY_10); + ppb = (s->const_p * cofficient * offset / WEIGHT_BY_10) + s->drift + + ki_term; + + /* FIXME tune cofficients */ + if (ppb < MAX_FREQ_NEG) { + ppb = MAX_FREQ_NEG; + } else if (ppb > MAX_FREQ_POS) { + ppb = MAX_FREQ_POS; + } else { + if (((s->drift >= 0) && ((OSI_LLONG_MAX - s->drift) < ki_term)) || + ((s->drift < 0) && ((-OSI_LLONG_MAX - s->drift) > ki_term))) { + } else { + + s->drift += ki_term; + } + s->offset[0] = s->offset[1]; + s->local[0] = s->local[1]; + } + break; + default: + break; + } + + s->last_ppb = ppb; + +fail: + if ((ppb > INT_MAX) || (ppb < -INT_MAX)) { + ppb = 0LL; + } + + return (nve32_t)ppb; +} + +static void cfg_l3_l4_filter(struct core_local *l_core) +{ + nveu32_t i = 0U; + + for (i = 0U; i < OSI_MGBE_MAX_L3_L4_FILTER; i++) { + if (l_core->cfg.l3_l4[i].filter_enb_dis == OSI_FALSE) { + /* filter not enabled */ + continue; + } + + (void)configure_l3l4_filter_helper( + (struct osi_core_priv_data *)(void *)l_core, + i, &l_core->cfg.l3_l4[i]); + +#if defined(L3L4_WILDCARD_FILTER) + if (i == 0U) { + /* first filter supposed to be tcp wildcard filter */ + l_core->l3l4_wildcard_filter_configured = OSI_ENABLE; + } +#endif /* L3L4_WILDCARD_FILTER */ + } +} + +static void cfg_l2_filter(struct core_local *l_core) +{ + nveu32_t i; + + (void)osi_l2_filter((struct osi_core_priv_data *)(void *)l_core, + &l_core->cfg.l2_filter); + + for (i = 0U; i < EQOS_MAX_MAC_ADDRESS_FILTER; i++) { + if (l_core->cfg.l2[i].used == OSI_DISABLE) { + continue; + } + + (void)osi_l2_filter((struct osi_core_priv_data *)(void *)l_core, + &l_core->cfg.l2[i].filter); + } +} + +static void cfg_rxcsum(struct core_local *l_core) +{ + (void)hw_config_rxcsum_offload((struct osi_core_priv_data *)(void *)l_core, + l_core->cfg.rxcsum); +} + +#ifndef OSI_STRIPPED_LIB +static void cfg_vlan(struct core_local *l_core) +{ + nveu32_t i; + + for (i = 0U; i < VLAN_NUM_VID; i++) { + if (l_core->cfg.vlan[i].used == OSI_DISABLE) { + continue; + } - val = (nvel64_t)sec - (nvel64_t)secondary_sec; - val = (nvel64_t)(val * 1000000000LL); - val += (nvel64_t)nsec - (nvel64_t)secondary_nsec; + (void)vlan_id_update((struct osi_core_priv_data *)(void *)l_core, + (l_core->cfg.vlan[i].vid | OSI_VLAN_ACTION_ADD)); + } +} - return val; +static void cfg_fc(struct core_local *l_core) +{ + (void)l_core->ops_p->config_flow_control((struct osi_core_priv_data *)(void *)l_core, + l_core->cfg.flow_ctrl); } -/** - * @brief calculate frequency adjustment between primary and secondary - * controller. - * Algorithm: - * - Convert Offset between primary and secondary interface to - * frequency adjustment value. - * - * @param[in] sec_osi_core: secondary interface osi core pointer - * @param[in] offset: offset btween primary and secondary interface - * @param[in] secondary_time: Secondary interface time in ns - * - * @retval calculated frequency adjustment value in ppb - */ -static inline nve32_t freq_offset_calculate(struct osi_core_priv_data *sec_osi_core, - nvel64_t offset, nvel64_t secondary_time) +static void cfg_eee(struct core_local *l_core) { - struct core_ptp_servo *s; - struct core_local *secondary_osi_lcore = (struct core_local *)sec_osi_core; - nvel64_t ki_term, ppb = 0; - nvel64_t cofficient; + (void)conf_eee((struct osi_core_priv_data *)(void *)l_core, + l_core->cfg.tx_lpi_enabled, + l_core->cfg.tx_lpi_timer); +} +#endif /* !OSI_STRIPPED_LIB */ - s = &secondary_osi_lcore->serv; - ppb = s->last_ppb; +static void cfg_avb(struct core_local *l_core) +{ + nveu32_t i; - /* if drift is too big in positive / negative don't take any action, - * it should be corrected with adjust time - * threshold value 1 sec - */ - if (offset >= 1000000000 || offset <= -1000000000) { - s->count = SERVO_STATS_0; /* JUMP */ - return (nve32_t) s->last_ppb; + for (i = 0U; i < OSI_MGBE_MAX_NUM_QUEUES; i++) { + if (l_core->cfg.avb[i].used == OSI_DISABLE) { + continue; + } + + (void)l_core->ops_p->set_avb_algorithm((struct osi_core_priv_data *)(void *)l_core, + &l_core->cfg.avb[i].avb_info); } +} - switch (s->count) { - case SERVO_STATS_0: - s->offset[0] = offset; - s->local[0] = secondary_time; - s->count = SERVO_STATS_1; - break; +static void cfg_est(struct core_local *l_core) +{ + (void)config_est((struct osi_core_priv_data *)(void *)l_core, + &l_core->cfg.est); +} - case SERVO_STATS_1: - s->offset[1] = offset; - s->local[1] = secondary_time; +static void cfg_fpe(struct core_local *l_core) +{ + (void)config_fpe((struct osi_core_priv_data *)(void *)l_core, + &l_core->cfg.fpe); +} - /* Make sure the first sample is older than the second. */ - if (s->local[0] >= s->local[1]) { - s->count = SERVO_STATS_0; - break; - } +static void cfg_ptp(struct core_local *l_core) +{ + struct osi_core_priv_data *osi_core = (struct osi_core_priv_data *)(void *)l_core; + struct osi_ioctl ioctl_data = {}; - /* Adjust drift by the measured frequency offset. */ - cofficient = (1000000000LL - s->drift) / (s->local[1] - s->local[0]); - s->drift += cofficient * s->offset[1]; + ioctl_data.arg1_u32 = l_core->cfg.ptp; + ioctl_data.cmd = OSI_CMD_CONFIG_PTP; - /* update this with constant */ - if (s->drift < -MAX_FREQ) { - s->drift = -MAX_FREQ; - } else if (s->drift > MAX_FREQ) { - s->drift = MAX_FREQ; - } + (void)osi_handle_ioctl(osi_core, &ioctl_data); +} - ppb = s->drift; - s->count = SERVO_STATS_2; - s->offset[0] = s->offset[1]; - s->local[0] = s->local[1]; - break; +static void cfg_frp(struct core_local *l_core) +{ + struct osi_core_priv_data *osi_core = (struct osi_core_priv_data *)(void *)l_core; - case SERVO_STATS_2: - s->offset[1] = offset; - s->local[1] = secondary_time; - cofficient = (1000000000LL) / (s->local[1] - s->local[0]); - /* calculate ppb */ - ki_term = (s->const_i * cofficient * offset * WEIGHT_BY_10) / (100);//weight; - ppb = (s->const_p * cofficient * offset * WEIGHT_BY_10) / (100) + s->drift + - ki_term; + (void)frp_hw_write(osi_core, l_core->ops_p); +} - /* FIXME tune cofficients */ - if (ppb < -MAX_FREQ) { - ppb = -MAX_FREQ; - } else if (ppb > MAX_FREQ) { - ppb = MAX_FREQ; - } else { - s->drift += ki_term; +static void apply_dynamic_cfg(struct osi_core_priv_data *osi_core) +{ + struct core_local *l_core = (struct core_local *)(void *)osi_core; + typedef void (*cfg_fn)(struct core_local *local_core); + const cfg_fn fn[11] = { + [DYNAMIC_CFG_L3_L4_IDX] = cfg_l3_l4_filter, + [DYNAMIC_CFG_L2_IDX] = cfg_l2_filter, + [DYNAMIC_CFG_RXCSUM_IDX] = cfg_rxcsum, +#ifndef OSI_STRIPPED_LIB + [DYNAMIC_CFG_VLAN_IDX] = cfg_vlan, + [DYNAMIC_CFG_FC_IDX] = cfg_fc, + [DYNAMIC_CFG_EEE_IDX] = cfg_eee, +#endif /* !OSI_STRIPPED_LIB */ + [DYNAMIC_CFG_AVB_IDX] = cfg_avb, + [DYNAMIC_CFG_EST_IDX] = cfg_est, + [DYNAMIC_CFG_FPE_IDX] = cfg_fpe, + [DYNAMIC_CFG_PTP_IDX] = cfg_ptp, + [DYNAMIC_CFG_FRP_IDX] = cfg_frp + }; + nveu32_t flags = l_core->cfg.flags; + nveu32_t i = 0U; + + while (flags > 0U) { + if ((flags & OSI_ENABLE) == OSI_ENABLE) { + fn[i](l_core); } - s->offset[0] = s->offset[1]; - s->local[0] = s->local[1]; - break; - default: - break; - } - s->last_ppb = ppb; + flags = flags >> 1U; + update_counter_u(&i, 1U); + } +} - return (nve32_t)ppb; +static void store_l2_filter(struct osi_core_priv_data *osi_core, + struct osi_filter *filter) +{ + struct core_local *l_core = (struct core_local *)(void *)osi_core; + + if ((filter->oper_mode & OSI_OPER_ADDR_UPDATE) == OSI_OPER_ADDR_UPDATE) { + (void)osi_memcpy(&l_core->cfg.l2[filter->index].filter, filter, + sizeof(struct osi_filter)); + l_core->cfg.l2[filter->index].used = OSI_ENABLE; + } else if ((filter->oper_mode & OSI_OPER_ADDR_DEL) == OSI_OPER_ADDR_DEL) { + l_core->cfg.l2[filter->index].used = OSI_DISABLE; + } else { + (void)osi_memcpy(&l_core->cfg.l2_filter, filter, + sizeof(struct osi_filter)); + } } -#endif -nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, - struct osi_ioctl *data) +/** + * @brief osi_hal_handle_ioctl - HW function API to handle runtime command + * + * @note + * Algorithm: + * - Handle runtime commands to OSI + * - OSI_CMD_MDC_CONFIG + * Derive MDC clock based on provided AXI_CBB clk + * arg1_u32 - CSR (AXI CBB) clock rate. + * - OSI_CMD_RESTORE_REGISTER + * Restore backup of MAC MMIO address space + * - OSI_CMD_POLL_FOR_MAC_RST + * Poll Software reset bit in MAC HW + * - OSI_CMD_COMMON_ISR + * Common ISR handler + * - OSI_CMD_PAD_CALIBRATION + * PAD calibration + * - OSI_CMD_READ_MMC + * invoke function to read actual registers and update + * structure variable mmc + * - OSI_CMD_GET_MAC_VER + * Reading MAC version + * arg1_u32 - holds mac version + * - OSI_CMD_VALIDATE_CORE_REG + * Read-validate HW registers for func safety + * - OSI_CMD_RESET_MMC + * invoke function to reset MMC counter and data + * structure + * - OSI_CMD_SAVE_REGISTER + * Take backup of MAC MMIO address space + * - OSI_CMD_MAC_LB + * Configure MAC loopback + * - OSI_CMD_FLOW_CTRL + * Configure flow control settings + * arg1_u32 - Enable or disable flow control settings + * - OSI_CMD_SET_MODE + * Set Full/Half Duplex mode. + * arg1_u32 - mode + * - OSI_CMD_SET_SPEED + * Set Operating speed + * arg1_u32 - Operating speed + * - OSI_CMD_L2_FILTER + * configure L2 mac filter + * l2_filter_struct - OSI filter structure + * - OSI_CMD_RXCSUM_OFFLOAD + * Configure RX checksum offload in MAC + * arg1_u32 - enable(1)/disable(0) + * - OSI_CMD_ADJ_FREQ + * Adjust frequency + * arg6_u32 - Parts per Billion + * - OSI_CMD_ADJ_TIME + * Adjust MAC time with system time + * arg1_u32 - Delta time in nano seconds + * - OSI_CMD_CONFIG_PTP + * Configure PTP + * arg1_u32 - Enable(1) or disable(0) Time Stamping + * - OSI_CMD_GET_AVB + * Get CBS algo and parameters + * avb_struct - osi core avb data structure + * - OSI_CMD_SET_AVB + * Set CBS algo and parameters + * avb_struct - osi core avb data structure + * - OSI_CMD_CONFIG_RX_CRC_CHECK + * Configure CRC Checking for Received Packets + * arg1_u32 - Enable or disable checking of CRC field in + * received pkts + * - OSI_CMD_UPDATE_VLAN_ID + * invoke osi call to update VLAN ID + * arg1_u32 - VLAN ID + * - OSI_CMD_CONFIG_TXSTATUS + * Configure Tx packet status reporting + * Enable(1) or disable(0) tx packet status reporting + * - OSI_CMD_GET_HW_FEAT + * Reading MAC HW features + * hw_feat_struct - holds the supported features of the hardware + * - OSI_CMD_CONFIG_FW_ERR + * Configure forwarding of error packets + * arg1_u32 - queue index, Max OSI_EQOS_MAX_NUM_QUEUES + * arg2_u32 - FWD error enable(1)/disable(0) + * - OSI_CMD_ARP_OFFLOAD + * Configure ARP offload in MAC + * arg1_u32 - Enable/disable flag + * arg7_u8_p - Char array representation of IP address + * - OSI_CMD_VLAN_FILTER + * OSI call for configuring VLAN filter + * vlan_filter - vlan filter structure + * - OSI_CMD_CONFIG_EEE + * Configure EEE LPI in MAC + * arg1_u32 - Enable (1)/disable (0) tx lpi + * arg2_u32 - Tx LPI entry timer in usecs upto + * OSI_MAX_TX_LPI_TIMER (in steps of 8usec) + * - OSI_CMD_L3L4_FILTER + * invoke OSI call to add L3/L4 + * l3l4_filter - l3_l4 filter structure + * arg1_u32 - L3 filter (ipv4(0) or ipv6(1)) + * or L4 filter (tcp(0) or udp(1) + * arg2_u32 - filter based dma routing enable(1) + * arg3_u32 - dma channel for routing based on filter. + * Max OSI_EQOS_MAX_NUM_CHANS. + * arg4_u32 - API call for L3 filter(0) or L4 filter(1) + * - OSI_CMD_SET_SYSTOHW_TIME + * set system to MAC hardware + * arg1_u32 - sec + * arg1_u32 - nsec + * - OSI_CMD_CONFIG_PTP_OFFLOAD + * enable/disable PTP offload feature + * pto_config - ptp offload structure + * - OSI_CMD_PTP_RXQ_ROUTE + * rxq routing to secific queue + * rxq_route - rxq routing information in structure + * - OSI_CMD_CONFIG_FRP + * Issue FRP command to HW + * frp_cmd - FRP command parameter + * - OSI_CMD_CONFIG_RSS + * Configure RSS + * - OSI_CMD_CONFIG_EST + * Configure EST registers and GCL to hw + * est - EST configuration structure + * - OSI_CMD_CONFIG_FPE + * Configuration FPE register and preemptable queue + * fpe - FPE configuration structure + * + * - OSI_CMD_GET_TX_TS + * Command to get TX timestamp for PTP packet + * ts - OSI core timestamp structure + * + * - OSI_CMD_FREE_TS + * Command to free old timestamp for PTP packet + * chan - DMA channel number +1. 0 will be used for onestep + * + * - OSI_CMD_CAP_TSC_PTP + * Capture TSC and PTP time stamp + * ptp_tsc_data - output structure with time + * + * - OSI_CMD_CONF_M2M_TS + * Enable/Disable MAC to MAC time sync for Secondary interface + * enable_disable - 1 - enable, 0- disable + * + * @param[in] osi_core: OSI core private data structure. + * @param[in] data: void pointer pointing to osi_ioctl + * + * @pre MAC should be init and started. see osi_start_mac() + * + * @note + * Traceability Details: + * + * @usage + * - Allowed context for the API call + * - Interrupt handler: No + * - Signal handler: No + * - Thread safe: No + * - Async/Sync: Sync + * - Required Privileges: None + * - API Group: + * - Initialization: No + * - Run time: Yes + * - De-initialization: No + * + * @retval 0 on success + * @retval -1 on failure. + */ +static nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, + struct osi_ioctl *data) { - struct core_local *l_core = (struct core_local *)osi_core; - struct core_ops *ops_p; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + const struct core_ops *ops_p; nve32_t ret = -1; -#if DRIFT_CAL struct osi_core_priv_data *sec_osi_core; struct core_local *secondary_osi_lcore; - struct core_ops *secondary_ops_p; - nvel64_t drift_value = 0x0; nveu32_t sec = 0x0; nveu32_t nsec = 0x0; - nveu32_t secondary_sec = 0x0; - nveu32_t secondary_nsec = 0x0; + nvel64_t drift_value = 0x0; nve32_t freq_adj_value = 0x0; - nvel64_t secondary_time; -#endif - - if (validate_args(osi_core, l_core) < 0) { - return ret; - } + nvel64_t secondary_time = 0x0; + nvel64_t primary_time = 0x0; ops_p = l_core->ops_p; - if (data == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "CORE: Invalid argument\n", 0ULL); - return -1; - } - switch (data->cmd) { -#ifndef OSI_STRIPPED_LIB - case OSI_CMD_RESTORE_REGISTER: - ret = ops_p->restore_registers(osi_core); - break; - case OSI_CMD_L3L4_FILTER: - ret = osi_l3l4_filter(osi_core, data->l3l4_filter, - data->arg1_u32, data->arg2_u32, - data->arg3_u32, data->arg4_u32); + ret = configure_l3l4_filter(osi_core, &data->l3l4_filter); + if (ret == 0) { + l_core->cfg.flags |= DYNAMIC_CFG_L3_L4; + } break; +#ifndef OSI_STRIPPED_LIB case OSI_CMD_MDC_CONFIG: ops_p->set_mdc_clk_rate(osi_core, data->arg5_u64); ret = 0; break; - case OSI_CMD_VALIDATE_CORE_REG: - ret = validate_core_regs(osi_core); - break; - case OSI_CMD_RESET_MMC: ops_p->reset_mmc(osi_core); ret = 0; break; - case OSI_CMD_SAVE_REGISTER: - ret = ops_p->save_registers(osi_core); - break; - case OSI_CMD_MAC_LB: ret = conf_mac_loopback(osi_core, data->arg1_u32); break; case OSI_CMD_FLOW_CTRL: ret = ops_p->config_flow_control(osi_core, data->arg1_u32); - break; - - case OSI_CMD_GET_AVB: - ret = ops_p->get_avb_algorithm(osi_core, &data->avb); - break; + if (ret == 0) { + l_core->cfg.flow_ctrl = data->arg1_u32; + l_core->cfg.flags |= DYNAMIC_CFG_FC; + } - case OSI_CMD_SET_AVB: - ret = ops_p->set_avb_algorithm(osi_core, &data->avb); break; case OSI_CMD_CONFIG_RX_CRC_CHECK: @@ -1713,16 +2475,24 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, case OSI_CMD_UPDATE_VLAN_ID: ret = vlan_id_update(osi_core, data->arg1_u32); + if (ret == 0) { + if ((data->arg1_u32 & VLAN_ACTION_MASK) == OSI_VLAN_ACTION_ADD) { + l_core->cfg.vlan[data->arg1_u32 & VLAN_VID_MASK].vid = + data->arg1_u32 & VLAN_VID_MASK; + l_core->cfg.vlan[data->arg1_u32 & VLAN_VID_MASK].used = OSI_ENABLE; + } else { + l_core->cfg.vlan[data->arg1_u32 & VLAN_VID_MASK].used = OSI_DISABLE; + } + + l_core->cfg.flags |= DYNAMIC_CFG_VLAN; + } + break; case OSI_CMD_CONFIG_TXSTATUS: ret = ops_p->config_tx_status(osi_core, data->arg1_u32); break; - case OSI_CMD_CONFIG_FW_ERR: - ret = ops_p->config_fw_err_pkts(osi_core, data->arg1_u32, - data->arg2_u32); - break; case OSI_CMD_ARP_OFFLOAD: ret = conf_arp_offload(osi_core, data->arg1_u32, @@ -1738,21 +2508,49 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, case OSI_CMD_CONFIG_EEE: ret = conf_eee(osi_core, data->arg1_u32, data->arg2_u32); + if (ret == 0) { + l_core->cfg.tx_lpi_enabled = data->arg1_u32; + l_core->cfg.tx_lpi_timer = data->arg2_u32; + l_core->cfg.flags |= DYNAMIC_CFG_EEE; + } + + break; + case OSI_CMD_CONFIG_FW_ERR: + ret = hw_config_fw_err_pkts(osi_core, data->arg1_u32, data->arg2_u32); break; -#endif /* !OSI_STRIPPED_LIB */ case OSI_CMD_POLL_FOR_MAC_RST: - ret = ops_p->poll_for_swr(osi_core); + ret = hw_poll_for_swr(osi_core); break; - case OSI_CMD_START_MAC: - ops_p->start_mac(osi_core); - ret = 0; + case OSI_CMD_GET_MAC_VER: + ret = osi_get_mac_version(osi_core, &data->arg1_u32); break; - case OSI_CMD_STOP_MAC: - ops_p->stop_mac(osi_core); - ret = 0; + case OSI_CMD_SET_MODE: + ret = hw_set_mode(osi_core, data->arg6_32); + break; +#endif /* !OSI_STRIPPED_LIB */ + + case OSI_CMD_GET_AVB: + ret = ops_p->get_avb_algorithm(osi_core, &data->avb); + break; + + case OSI_CMD_SET_AVB: + if (data->avb.algo == OSI_MTL_TXQ_AVALG_CBS) { + ret = hw_validate_avb_input(osi_core, &data->avb); + if (ret != 0) { + break; + } + } + + ret = ops_p->set_avb_algorithm(osi_core, &data->avb); + if (ret == 0) { + (void)osi_memcpy(&l_core->cfg.avb[data->avb.qindex].avb_info, + &data->avb, sizeof(struct osi_core_avb_algorithm)); + l_core->cfg.avb[data->avb.qindex].used = OSI_ENABLE; + l_core->cfg.flags |= DYNAMIC_CFG_AVB; + } break; case OSI_CMD_COMMON_ISR: @@ -1769,31 +2567,32 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, ret = 0; break; - case OSI_CMD_GET_MAC_VER: - ret = osi_get_mac_version(osi_core, &data->arg1_u32); - break; - - case OSI_CMD_SET_MODE: - ret = ops_p->set_mode(osi_core, data->arg6_32); - break; - case OSI_CMD_SET_SPEED: - ret = ops_p->set_speed(osi_core, data->arg6_32); + ret = hw_set_speed(osi_core, data->arg6_32); break; case OSI_CMD_L2_FILTER: ret = osi_l2_filter(osi_core, &data->l2_filter); + if (ret == 0) { + store_l2_filter(osi_core, &data->l2_filter); + l_core->cfg.flags |= DYNAMIC_CFG_L2; + } + break; case OSI_CMD_RXCSUM_OFFLOAD: - ret = ops_p->config_rxcsum_offload(osi_core, data->arg1_u32); + ret = hw_config_rxcsum_offload(osi_core, data->arg1_u32); + if (ret == 0) { + l_core->cfg.rxcsum = data->arg1_u32; + l_core->cfg.flags |= DYNAMIC_CFG_RXCSUM; + } + break; case OSI_CMD_ADJ_FREQ: ret = osi_adjust_freq(osi_core, data->arg6_32); -#if DRIFT_CAL if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: adjust freq failed\n", 0ULL); break; } @@ -1804,7 +2603,7 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } sec_osi_core = get_role_pointer(OSI_PTP_M2M_SECONDARY); - secondary_osi_lcore = (struct core_local *)sec_osi_core; + secondary_osi_lcore = (struct core_local *)(void *)sec_osi_core; if ((validate_args(sec_osi_core, secondary_osi_lcore) < 0) || (secondary_osi_lcore->hw_init_successful != OSI_ENABLE) || (secondary_osi_lcore->m2m_tsync != OSI_ENABLE)) { @@ -1812,16 +2611,10 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } if (l_core->ether_m2m_role == OSI_PTP_M2M_PRIMARY) { - drift_value = 0x0; - osi_lock_irq_enabled(&secondary_osi_lcore->serv.m2m_lock); - read_sec_ns(sec_osi_core->base, - sec_osi_core->mac, &secondary_sec, &secondary_nsec); - read_sec_ns(osi_core->base, - osi_core->mac, &sec, &nsec); - osi_unlock_irq_enabled(&secondary_osi_lcore->serv.m2m_lock); + drift_value = dirft_calculation(osi_core, sec_osi_core, + &primary_time, + &secondary_time); - drift_value = dirft_calculation(sec, nsec, secondary_sec, secondary_nsec); - secondary_time = (secondary_sec * 1000000000LL) + secondary_nsec; secondary_osi_lcore->serv.const_i = I_COMPONENT_BY_10; secondary_osi_lcore->serv.const_p = P_COMPONENT_BY_10; freq_adj_value = freq_offset_calculate(sec_osi_core, @@ -1831,6 +2624,13 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, /* call adjust time as JUMP happened */ ret = osi_adjust_time(sec_osi_core, drift_value); + if (ret < 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, + "CORE: adjust_time failed\n", + 0ULL); + } else { + ret = osi_adjust_freq(sec_osi_core, 0); + } } else { ret = osi_adjust_freq(sec_osi_core, freq_adj_value); @@ -1838,19 +2638,19 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: adjust_freq for sec_controller failed\n", 0ULL); ret = 0; } -#endif + break; case OSI_CMD_ADJ_TIME: ret = osi_adjust_time(osi_core, data->arg8_64); -#if DRIFT_CAL + if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: adjust_time failed\n", 0ULL); break; } @@ -1861,7 +2661,7 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } sec_osi_core = get_role_pointer(OSI_PTP_M2M_SECONDARY); - secondary_osi_lcore = (struct core_local *)sec_osi_core; + secondary_osi_lcore = (struct core_local *)(void *)sec_osi_core; if ((validate_args(sec_osi_core, secondary_osi_lcore) < 0) || (secondary_osi_lcore->hw_init_successful != OSI_ENABLE) || (secondary_osi_lcore->m2m_tsync != OSI_ENABLE)) { @@ -1870,37 +2670,36 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, if (l_core->ether_m2m_role == OSI_PTP_M2M_PRIMARY) { drift_value = 0x0; - osi_lock_irq_enabled(&secondary_osi_lcore->serv.m2m_lock); - read_sec_ns(sec_osi_core->base, - sec_osi_core->mac, &secondary_sec, &secondary_nsec); - read_sec_ns(osi_core->base, - osi_core->mac, &sec, &nsec); - osi_unlock_irq_enabled(&secondary_osi_lcore->serv.m2m_lock); - drift_value = dirft_calculation(sec, nsec, - secondary_sec, - secondary_nsec); + drift_value = dirft_calculation(osi_core, sec_osi_core, + &primary_time, + &secondary_time); ret = osi_adjust_time(sec_osi_core, drift_value); if (ret == 0) { secondary_osi_lcore->serv.count = SERVO_STATS_0; secondary_osi_lcore->serv.drift = 0; secondary_osi_lcore->serv.last_ppb = 0; + ret = osi_adjust_freq(sec_osi_core, 0); } } if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: adjust_time for sec_controller failed\n", 0ULL); ret = 0; } -#endif + break; case OSI_CMD_CONFIG_PTP: ret = osi_ptp_configuration(osi_core, data->arg1_u32); -#if DRIFT_CAL + if (ret == 0) { + l_core->cfg.ptp = data->arg1_u32; + l_core->cfg.flags |= DYNAMIC_CFG_PTP; + } + if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: configure_ptp failed\n", 0ULL); break; } @@ -1911,7 +2710,7 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } sec_osi_core = get_role_pointer(OSI_PTP_M2M_SECONDARY); - secondary_osi_lcore = (struct core_local *)sec_osi_core; + secondary_osi_lcore = (struct core_local *)(void *)sec_osi_core; if ((validate_args(sec_osi_core, secondary_osi_lcore) < 0) || (secondary_osi_lcore->hw_init_successful != OSI_ENABLE) || (secondary_osi_lcore->m2m_tsync != OSI_ENABLE)) { @@ -1924,7 +2723,7 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, secondary_osi_lcore->serv.drift = 0; secondary_osi_lcore->serv.last_ppb = 0; } -#endif + break; case OSI_CMD_GET_HW_FEAT: @@ -1932,11 +2731,10 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, break; case OSI_CMD_SET_SYSTOHW_TIME: - ret = ops_p->set_systime_to_mac(osi_core, data->arg1_u32, - data->arg2_u32); -#if DRIFT_CAL + ret = hw_set_systime_to_mac(osi_core, data->arg1_u32, data->arg2_u32); + if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: set systohw time failed\n", 0ULL); break; } @@ -1947,7 +2745,7 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } sec_osi_core = get_role_pointer(OSI_PTP_M2M_SECONDARY); - secondary_osi_lcore = (struct core_local *)sec_osi_core; + secondary_osi_lcore = (struct core_local *)(void *)sec_osi_core; if ((validate_args(sec_osi_core, secondary_osi_lcore) < 0) || (secondary_osi_lcore->hw_init_successful != OSI_ENABLE) || (secondary_osi_lcore->m2m_tsync != OSI_ENABLE)) { @@ -1955,28 +2753,27 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, } if (l_core->ether_m2m_role == OSI_PTP_M2M_PRIMARY) { - drift_value = 0x0; osi_lock_irq_enabled(&secondary_osi_lcore->serv.m2m_lock); - read_sec_ns(osi_core->base, + common_get_systime_from_mac(osi_core->base, osi_core->mac, &sec, &nsec); osi_unlock_irq_enabled(&secondary_osi_lcore->serv.m2m_lock); - secondary_ops_p = secondary_osi_lcore->ops_p; - ret = secondary_ops_p->set_systime_to_mac(sec_osi_core, sec, - nsec); + ret = hw_set_systime_to_mac(sec_osi_core, sec, nsec); if (ret == 0) { secondary_osi_lcore->serv.count = SERVO_STATS_0; secondary_osi_lcore->serv.drift = 0; secondary_osi_lcore->serv.last_ppb = 0; + ret = osi_adjust_freq(sec_osi_core, 0); } } if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: set_time for sec_controller failed\n", 0ULL); ret = 0; } -#endif + break; +#ifndef OSI_STRIPPED_LIB case OSI_CMD_CONFIG_PTP_OFFLOAD: ret = conf_ptp_offload(osi_core, &data->pto_config); break; @@ -1985,20 +2782,34 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, ret = rxq_route_config(osi_core, &data->rxq_route); break; - case OSI_CMD_CONFIG_FRP: - ret = configure_frp(osi_core, &data->frp_cmd); - break; - case OSI_CMD_CONFIG_RSS: ret = ops_p->config_rss(osi_core); break; +#endif /* !OSI_STRIPPED_LIB */ + case OSI_CMD_CONFIG_FRP: + ret = configure_frp(osi_core, &data->frp_cmd); + l_core->cfg.flags |= DYNAMIC_CFG_FRP; + break; + case OSI_CMD_CONFIG_EST: ret = config_est(osi_core, &data->est); + if (ret == 0) { + (void)osi_memcpy(&l_core->cfg.est, &data->est, + sizeof(struct osi_est_config)); + l_core->cfg.flags |= DYNAMIC_CFG_EST; + } + break; case OSI_CMD_CONFIG_FPE: ret = config_fpe(osi_core, &data->fpe); + if (ret == 0) { + (void)osi_memcpy(&l_core->cfg.fpe, &data->fpe, + sizeof(struct osi_fpe_config)); + l_core->cfg.flags |= DYNAMIC_CFG_FPE; + } + break; case OSI_CMD_READ_REG: @@ -2030,6 +2841,12 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, case OSI_CMD_MAC_MTU: ret = 0; +#ifdef MACSEC_SUPPORT + if ((osi_core->macsec_ops != OSI_NULL) && + (osi_core->macsec_ops->update_mtu != OSI_NULL)) { + ret = osi_core->macsec_ops->update_mtu(osi_core, data->arg1_u32); + } +#endif /* MACSEC_SUPPORT */ break; #ifdef OSI_DEBUG @@ -2043,7 +2860,7 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, break; #endif /* OSI_DEBUG */ case OSI_CMD_CAP_TSC_PTP: - ret = ops_p->ptp_tsc_capture(osi_core, &data->ptp_tsc); + ret = hw_ptp_tsc_capture(osi_core, &data->ptp_tsc); break; case OSI_CMD_CONF_M2M_TS: @@ -2056,9 +2873,33 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, case OSI_CMD_HSI_CONFIGURE: ret = ops_p->core_hsi_configure(osi_core, data->arg1_u32); break; + case OSI_CMD_HSI_INJECT_ERR: + ret = ops_p->core_hsi_inject_err(osi_core, data->arg1_u32); + break; +#endif + +#ifdef OSI_DEBUG + case OSI_CMD_DEBUG_INTR_CONFIG: +#ifdef DEBUG_MACSEC + osi_core->macsec_ops->intr_config(osi_core, data->arg1_u32); #endif + ret = 0; + break; +#endif + case OSI_CMD_SUSPEND: + l_core->state = OSI_SUSPENDED; + ret = osi_hal_hw_core_deinit(osi_core); + break; + case OSI_CMD_RESUME: + ret = osi_hal_hw_core_init(osi_core); + if (ret < 0) { + break; + } + + apply_dynamic_cfg(osi_core); + break; default: - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "CORE: Incorrect command\n", (nveul64_t)data->cmd); break; @@ -2067,24 +2908,6 @@ nve32_t osi_hal_handle_ioctl(struct osi_core_priv_data *osi_core, return ret; } -nve32_t osi_get_hw_features(struct osi_core_priv_data *const osi_core, - struct osi_hw_features *hw_feat) -{ - struct core_local *l_core = (struct core_local *)osi_core; - - if (validate_args(osi_core, l_core) < 0) { - return -1; - } - - if (hw_feat == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "CORE: Invalid hw_feat\n", 0ULL); - return -1; - } - - return l_core->ops_p->get_hw_features(osi_core, hw_feat); -} - void hw_interface_init_core_ops(struct if_core_ops *if_ops_p) { if_ops_p->if_core_init = osi_hal_hw_core_init; diff --git a/kernel/nvethernetrm/osi/core/vlan_filter.c b/kernel/nvethernetrm/osi/core/vlan_filter.c index 4f99be2271..9f8fdaf56f 100644 --- a/kernel/nvethernetrm/osi/core/vlan_filter.c +++ b/kernel/nvethernetrm/osi/core/vlan_filter.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,6 +20,7 @@ * DEALINGS IN THE SOFTWARE. */ +#ifndef OSI_STRIPPED_LIB #include "../osi/common/common.h" #include "vlan_filter.h" @@ -35,11 +36,11 @@ * @return Index from VID array if match found. * @return Return VLAN_HW_FILTER_FULL_IDX if not found. */ -static inline unsigned int get_vlan_filter_idx( +static inline nveu32_t get_vlan_filter_idx( struct osi_core_priv_data *osi_core, - unsigned short vlan_id) + nveu16_t vlan_id) { - unsigned int vid_idx = VLAN_HW_FILTER_FULL_IDX; + nveu32_t vid_idx = VLAN_HW_FILTER_FULL_IDX; unsigned long bitmap = osi_core->vf_bitmap; unsigned long temp = 0U; @@ -48,7 +49,7 @@ static inline unsigned int get_vlan_filter_idx( if (osi_core->vid[temp] == vlan_id) { /* vlan ID match found */ - vid_idx = (unsigned int)temp; + vid_idx = (nveu32_t)temp; break; } @@ -70,11 +71,11 @@ static inline unsigned int get_vlan_filter_idx( * * @return 0 on success */ -static inline int allow_all_vid_tags(unsigned char *base, - unsigned int pass_all_vids) +static inline nve32_t allow_all_vid_tags(nveu8_t *base, + nveu32_t pass_all_vids) { - unsigned int vlan_tag_reg = 0; - unsigned int hash_filter_reg = 0; + nveu32_t vlan_tag_reg = 0; + nveu32_t hash_filter_reg = 0; vlan_tag_reg = osi_readl(base + MAC_VLAN_TAG_CTRL); hash_filter_reg = osi_readl(base + MAC_VLAN_HASH_FILTER); @@ -84,7 +85,7 @@ static inline int allow_all_vid_tags(unsigned char *base, hash_filter_reg |= VLAN_HASH_ALLOW_ALL; } else { vlan_tag_reg &= ~MAC_VLAN_TAG_CTRL_VHTM; - hash_filter_reg &= (unsigned int) ~VLAN_HASH_ALLOW_ALL; + hash_filter_reg &= (nveu32_t) ~VLAN_HASH_ALLOW_ALL; } osi_writel(vlan_tag_reg, base + MAC_VLAN_TAG_CTRL); @@ -107,11 +108,11 @@ static inline int allow_all_vid_tags(unsigned char *base, * @return 0 on Success. * @return negative value on failure */ -static inline int is_vlan_id_enqueued(struct osi_core_priv_data *osi_core, - unsigned short vlan_id, - unsigned int *idx) +static inline nve32_t is_vlan_id_enqueued(struct osi_core_priv_data *osi_core, + nveu16_t vlan_id, + nveu32_t *idx) { - unsigned int i = 0; + nveu32_t i = 0; if (osi_core->vlan_filter_cnt == VLAN_HW_FILTER_FULL_IDX) { /* No elements in SW queue to search */ @@ -140,11 +141,11 @@ static inline int is_vlan_id_enqueued(struct osi_core_priv_data *osi_core, * @return 0 on success. * @return negative value on failure. */ -static inline int enqueue_vlan_id(struct osi_core_priv_data *osi_core, - unsigned short vlan_id) +static inline nve32_t enqueue_vlan_id(struct osi_core_priv_data *osi_core, + nveu16_t vlan_id) { - int ret = 0; - unsigned int idx; + nve32_t ret = 0; + nveu32_t idx; if (osi_core->vlan_filter_cnt == VLAN_NUM_VID) { /* Entire SW queue full */ @@ -154,7 +155,7 @@ static inline int enqueue_vlan_id(struct osi_core_priv_data *osi_core, /* Check if requested vlan_id alredy queued */ ret = is_vlan_id_enqueued(osi_core, vlan_id, &idx); if (ret == 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "VLAN ID already programmed\n", 0ULL); return -1; @@ -177,25 +178,25 @@ static inline int enqueue_vlan_id(struct osi_core_priv_data *osi_core, * @return 0 on success. * @return -1 on failure. */ -static inline int poll_for_vlan_filter_reg_rw( +static inline nve32_t poll_for_vlan_filter_reg_rw( struct osi_core_priv_data *osi_core) { - unsigned int retry = 10; - unsigned int count; - unsigned int val = 0; - int cond = 1; + nveu32_t retry = 10; + nveu32_t count; + nveu32_t val = 0; + nve32_t cond = 1; count = 0; while (cond == 1) { if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "VLAN filter update timedout\n", 0ULL); return -1; } count++; - val = osi_readl((unsigned char *)osi_core->base + + val = osi_readl((nveu8_t *)osi_core->base + MAC_VLAN_TAG_CTRL); if ((val & MAC_VLAN_TAG_CTRL_OB) == OSI_NONE) { /* Set cond to 0 to exit loop */ @@ -222,17 +223,17 @@ static inline int poll_for_vlan_filter_reg_rw( * @return 0 on success * @return -1 on failure. */ -static inline int update_vlan_filters(struct osi_core_priv_data *osi_core, - unsigned int vid_idx, - unsigned int val) +static inline nve32_t update_vlan_filters(struct osi_core_priv_data *osi_core, + nveu32_t vid_idx, + nveu32_t val) { - unsigned char *base = (unsigned char *)osi_core->base; - int ret = 0; + nveu8_t *base = (nveu8_t *)osi_core->base; + nve32_t ret = 0; osi_writel(val, base + MAC_VLAN_TAG_DATA); val = osi_readl(base + MAC_VLAN_TAG_CTRL); - val &= (unsigned int) ~MAC_VLAN_TAG_CTRL_OFS_MASK; + val &= (nveu32_t) ~MAC_VLAN_TAG_CTRL_OFS_MASK; val |= vid_idx << MAC_VLAN_TAG_CTRL_OFS_SHIFT; val &= ~MAC_VLAN_TAG_CTRL_CT; val |= MAC_VLAN_TAG_CTRL_OB; @@ -240,7 +241,7 @@ static inline int update_vlan_filters(struct osi_core_priv_data *osi_core, ret = poll_for_vlan_filter_reg_rw(osi_core); if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "Failed to update VLAN filters\n", 0ULL); return -1; } @@ -259,13 +260,13 @@ static inline int update_vlan_filters(struct osi_core_priv_data *osi_core, * @return 0 on success * @return -1 on failure. */ -static inline int add_vlan_id(struct osi_core_priv_data *osi_core, +static inline nve32_t add_vlan_id(struct osi_core_priv_data *osi_core, struct core_ops *ops_p, - unsigned short vlan_id) + nveu16_t vlan_id) { - unsigned int vid_idx = 0; - unsigned int val = 0; - int ret = 0; + nveu32_t vid_idx = 0; + nveu32_t val = 0; + nve32_t ret = 0; /* Check if VLAN ID already programmed */ vid_idx = get_vlan_filter_idx(osi_core, vlan_id); @@ -277,7 +278,7 @@ static inline int add_vlan_id(struct osi_core_priv_data *osi_core, } /* Get free index to add the VID */ - vid_idx = (unsigned int) __builtin_ctzl(~osi_core->vf_bitmap); + vid_idx = (nveu32_t) __builtin_ctzl(~osi_core->vf_bitmap); /* If there is no free filter index add into SW VLAN filter queue to store */ if (vid_idx == VLAN_HW_FILTER_FULL_IDX) { /* Add VLAN ID to SW queue */ @@ -299,14 +300,14 @@ static inline int add_vlan_id(struct osi_core_priv_data *osi_core, OSI_DISABLE, OSI_DISABLE); if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Failed to enable VLAN filtering\n", 0ULL); return -1; } } - val = osi_readl((unsigned char *)osi_core->base + MAC_VLAN_TAG_DATA); - val &= (unsigned int) ~VLAN_VID_MASK; + val = osi_readl((nveu8_t *)osi_core->base + MAC_VLAN_TAG_DATA); + val &= (nveu32_t) ~VLAN_VID_MASK; val |= (vlan_id | MAC_VLAN_TAG_DATA_ETV | MAC_VLAN_TAG_DATA_VEN); return update_vlan_filters(osi_core, vid_idx, val); @@ -325,10 +326,10 @@ static inline int add_vlan_id(struct osi_core_priv_data *osi_core, * @return 0 on success * @return -1 on failure. */ -static inline int dequeue_vlan_id(struct osi_core_priv_data *osi_core, - unsigned int idx) +static inline nve32_t dequeue_vlan_id(struct osi_core_priv_data *osi_core, + nveu32_t idx) { - unsigned int i; + nveu32_t i; if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) { return -1; @@ -336,14 +337,14 @@ static inline int dequeue_vlan_id(struct osi_core_priv_data *osi_core, /* Left shift the array elements by one for the VID order */ for (i = idx; i <= osi_core->vlan_filter_cnt; i++) { - osi_core->vid[i] = osi_core->vid[i + 1]; + osi_core->vid[i] = osi_core->vid[i + 1U]; } osi_core->vid[i] = VLAN_ID_INVALID; osi_core->vlan_filter_cnt--; if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) { - allow_all_vid_tags(osi_core->base, OSI_DISABLE); + return allow_all_vid_tags(osi_core->base, OSI_DISABLE); } return 0; @@ -363,14 +364,14 @@ static inline int dequeue_vlan_id(struct osi_core_priv_data *osi_core, * @return 0 on success * @return -1 on failure. */ -static inline int dequeue_vid_to_add_filter_reg( +static inline nve32_t dequeue_vid_to_add_filter_reg( struct osi_core_priv_data *osi_core, - unsigned int vid_idx) + nveu32_t vid_idx) { - unsigned int val = 0; - unsigned short vlan_id = 0; - unsigned int i = 0; - int ret = 0; + nveu32_t val = 0; + nveu16_t vlan_id = 0; + nveu32_t i = 0; + nve32_t ret = 0; vlan_id = osi_core->vid[VLAN_HW_FILTER_FULL_IDX]; if (vlan_id == VLAN_ID_INVALID) { @@ -380,8 +381,8 @@ static inline int dequeue_vid_to_add_filter_reg( osi_core->vf_bitmap |= OSI_BIT(vid_idx); osi_core->vid[vid_idx] = vlan_id; - val = osi_readl((unsigned char *)osi_core->base + MAC_VLAN_TAG_DATA); - val &= (unsigned int) ~VLAN_VID_MASK; + val = osi_readl((nveu8_t *)osi_core->base + MAC_VLAN_TAG_DATA); + val &= (nveu32_t) ~VLAN_VID_MASK; val |= (vlan_id | MAC_VLAN_TAG_DATA_ETV | MAC_VLAN_TAG_DATA_VEN); ret = update_vlan_filters(osi_core, vid_idx, val); @@ -390,7 +391,7 @@ static inline int dequeue_vid_to_add_filter_reg( } for (i = VLAN_HW_FILTER_FULL_IDX; i <= osi_core->vlan_filter_cnt; i++) { - osi_core->vid[i] = osi_core->vid[i + 1]; + osi_core->vid[i] = osi_core->vid[i + 1U]; } osi_core->vid[i] = VLAN_ID_INVALID; @@ -409,14 +410,14 @@ static inline int dequeue_vid_to_add_filter_reg( * @return 0 on success * @return -1 on failure. */ -static inline int del_vlan_id(struct osi_core_priv_data *osi_core, +static inline nve32_t del_vlan_id(struct osi_core_priv_data *osi_core, struct core_ops *ops_p, - unsigned short vlan_id) + nveu16_t vlan_id) { - unsigned int vid_idx = 0; - unsigned int val = 0; - unsigned int idx; - int ret = 0; + nveu32_t vid_idx = 0; + nveu32_t val = 0; + nveu32_t idx; + nve32_t ret = 0; /* Search for vlan filter index to be deleted */ vid_idx = get_vlan_filter_idx(osi_core, vlan_id); @@ -445,26 +446,29 @@ static inline int del_vlan_id(struct osi_core_priv_data *osi_core, OSI_DISABLE, OSI_DISABLE); if (ret < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_INVALID, "Failed to disable VLAN filtering\n", 0ULL); return -1; } } if (osi_core->vlan_filter_cnt == VLAN_HW_MAX_NRVF) { - allow_all_vid_tags(osi_core->base, OSI_DISABLE); + ret = allow_all_vid_tags(osi_core->base, OSI_DISABLE); + if (ret < 0) { + return -1; + } } /* if SW queue is not empty dequeue from SW queue and update filter */ return dequeue_vid_to_add_filter_reg(osi_core, vid_idx); } -int update_vlan_id(struct osi_core_priv_data *osi_core, +nve32_t update_vlan_id(struct osi_core_priv_data *osi_core, struct core_ops *ops_p, - unsigned int vid) + nveu32_t vid) { - unsigned int action = vid & VLAN_ACTION_MASK; - unsigned short vlan_id = vid & VLAN_VID_MASK; + nveu32_t action = vid & VLAN_ACTION_MASK; + nveu16_t vlan_id = (nveu16_t)(vid & VLAN_VID_MASK); if (action == OSI_VLAN_ACTION_ADD) { return add_vlan_id(osi_core, ops_p, vlan_id); @@ -472,3 +476,4 @@ int update_vlan_id(struct osi_core_priv_data *osi_core, return del_vlan_id(osi_core, ops_p, vlan_id); } +#endif /* !OSI_STRIPPED_LIB */ diff --git a/kernel/nvethernetrm/osi/core/vlan_filter.h b/kernel/nvethernetrm/osi/core/vlan_filter.h index d4406ce479..574c1f268b 100644 --- a/kernel/nvethernetrm/osi/core/vlan_filter.h +++ b/kernel/nvethernetrm/osi/core/vlan_filter.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,6 +26,7 @@ #include #include "core_local.h" +#ifndef OSI_STRIPPED_LIB /** * @addtogroup MAC-VLAN MAC VLAN configuration registers and bit fields * @@ -36,7 +37,7 @@ #define MAC_VLAN_TAG_CTRL 0x50 #define MAC_VLAN_TAG_DATA 0x54 #define MAC_VLAN_HASH_FILTER 0x58 -#define MAC_VLAN_TAG_CTRL_OFS_MASK 0x7C +#define MAC_VLAN_TAG_CTRL_OFS_MASK 0x7CU #define MAC_VLAN_TAG_CTRL_OFS_SHIFT 2U #define MAC_VLAN_TAG_CTRL_CT OSI_BIT(1) #define MAC_VLAN_TAG_CTRL_OB OSI_BIT(0) @@ -53,9 +54,9 @@ */ #define VLAN_HW_MAX_NRVF 32U #define VLAN_HW_FILTER_FULL_IDX VLAN_HW_MAX_NRVF -#define VLAN_VID_MASK 0xFFFF -#define VLAN_ID_INVALID 0xFFFF -#define VLAN_HASH_ALLOW_ALL 0xFFFF +#define VLAN_VID_MASK 0xFFFFU +#define VLAN_ID_INVALID 0xFFFFU +#define VLAN_HASH_ALLOW_ALL 0xFFFFU #define VLAN_ACTION_MASK OSI_BIT(31) /** @} */ @@ -70,7 +71,7 @@ * @return 0 on success * @return -1 on failure. */ -int update_vlan_id(struct osi_core_priv_data *osi_core, - struct core_ops *ops_p, - unsigned int vid); +nve32_t update_vlan_id(struct osi_core_priv_data *osi_core, + struct core_ops *ops_p, nveu32_t vid); +#endif /* !OSI_STRIPPED_LIB */ #endif /* VLAN_FILTER_H */ diff --git a/kernel/nvethernetrm/osi/core/xpcs.c b/kernel/nvethernetrm/osi/core/xpcs.c index 6ba0d31252..4b070fa735 100644 --- a/kernel/nvethernetrm/osi/core/xpcs.c +++ b/kernel/nvethernetrm/osi/core/xpcs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -21,6 +21,7 @@ */ #include "xpcs.h" +#include "core_local.h" /** * @brief xpcs_poll_for_an_complete - Polling for AN complete. @@ -34,22 +35,22 @@ * @retval 0 on success * @retval -1 on failure. */ -static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core, - unsigned int *an_status) +static inline nve32_t xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core, + nveu32_t *an_status) { void *xpcs_base = osi_core->xpcs_base; - unsigned int status = 0; - unsigned int retry = 1000; - unsigned int count; - int cond = 1; - int ret = 0; + nveu32_t status = 0; + nveu32_t retry = 1000; + nveu32_t count; + nve32_t cond = 1; + nve32_t ret = 0; /* 14. Poll for AN complete */ cond = 1; count = 0; while (cond == 1) { if (count > retry) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "XPCS AN completion timed out\n", 0ULL); #ifdef HSI_SUPPORT if (osi_core->hsi.enabled == OSI_ENABLE) { @@ -59,7 +60,8 @@ static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core, osi_core->hsi.report_count_err[AUTONEG_ERR_IDX] = OSI_ENABLE; } #endif - return -1; + ret = -1; + goto fail; } count++; @@ -73,20 +75,22 @@ static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core, status &= ~XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR; ret = xpcs_write_safety(osi_core, XPCS_VR_MII_AN_INTR_STS, status); if (ret != 0) { - return ret; + goto fail; } cond = 0; } } if ((status & XPCS_USXG_AN_STS_SPEED_MASK) == 0U) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "XPCS AN completed with zero speed\n", 0ULL); - return -1; + ret = -1; + goto fail; } *an_status = status; - return 0; +fail: + return ret; } /** @@ -100,11 +104,11 @@ static inline int xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure */ -static inline int xpcs_set_speed(struct osi_core_priv_data *osi_core, - unsigned int status) +static inline nve32_t xpcs_set_speed(struct osi_core_priv_data *osi_core, + nveu32_t status) { - unsigned int speed = status & XPCS_USXG_AN_STS_SPEED_MASK; - unsigned int ctrl = 0; + nveu32_t speed = status & XPCS_USXG_AN_STS_SPEED_MASK; + nveu32_t ctrl = 0; void *xpcs_base = osi_core->xpcs_base; ctrl = xpcs_read(xpcs_base, XPCS_SR_MII_CTRL); @@ -141,21 +145,21 @@ static inline int xpcs_set_speed(struct osi_core_priv_data *osi_core, * @retval 0 on success * @retval -1 on failure. */ -int xpcs_start(struct osi_core_priv_data *osi_core) +nve32_t xpcs_start(struct osi_core_priv_data *osi_core) { void *xpcs_base = osi_core->xpcs_base; - unsigned int an_status = 0; - unsigned int retry = RETRY_COUNT; - unsigned int count = 0; - unsigned int ctrl = 0; - int ret = 0; - int cond = COND_NOT_MET; + nveu32_t an_status = 0; + nveu32_t retry = RETRY_COUNT; + nveu32_t count = 0; + nveu32_t ctrl = 0; + nve32_t ret = 0; + nve32_t cond = COND_NOT_MET; if (osi_core->xpcs_base == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "XPCS base is NULL", 0ULL); - /* TODO: Remove this once silicon arrives */ - return 0; + ret = -1; + goto fail; } if ((osi_core->phy_iface_mode == OSI_USXGMII_MODE_10G) || @@ -164,16 +168,16 @@ int xpcs_start(struct osi_core_priv_data *osi_core) ctrl |= XPCS_SR_MII_CTRL_AN_ENABLE; ret = xpcs_write_safety(osi_core, XPCS_SR_MII_CTRL, ctrl); if (ret != 0) { - return ret; + goto fail; } ret = xpcs_poll_for_an_complete(osi_core, &an_status); if (ret < 0) { - return ret; + goto fail; } ret = xpcs_set_speed(osi_core, an_status); if (ret != 0) { - return ret; + goto fail; } /* USXGMII Rate Adaptor Reset before data transfer */ ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1); @@ -181,7 +185,8 @@ int xpcs_start(struct osi_core_priv_data *osi_core) xpcs_write(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl); while (cond == COND_NOT_MET) { if (count > retry) { - return -1; + ret = -1; + goto fail; } count++; @@ -200,7 +205,8 @@ int xpcs_start(struct osi_core_priv_data *osi_core) count = 0; while (cond == COND_NOT_MET) { if (count > retry) { - return -1; + ret = -1; + break; } count++; @@ -210,11 +216,16 @@ int xpcs_start(struct osi_core_priv_data *osi_core) XPCS_SR_XS_PCS_STS1_RLU) { cond = COND_MET; } else { - osi_core->osd_ops.udelay(1000U); + /* Maximum wait delay as per HW team is 1msec. + * So add a loop for 1000 iterations with 1usec delay, + * so that if check get satisfies before 1msec will come + * out of loop and it can save some boot time + */ + osi_core->osd_ops.udelay(1U); } } - - return 0; +fail: + return ret; } /** @@ -230,46 +241,50 @@ int xpcs_start(struct osi_core_priv_data *osi_core) * @retval -1 on failure. */ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core, - unsigned int lane_init_en) + nveu32_t lane_init_en) { void *xpcs_base = osi_core->xpcs_base; - nveu32_t retry = XPCS_RETRY_COUNT; + nveu32_t retry = 5U; nve32_t cond = COND_NOT_MET; nveu32_t val = 0; nveu32_t count; + nve32_t ret = 0; val = osi_readla(osi_core, (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_STATUS); - if ((val & XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) == + if ((val & XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) != XPCS_WRAP_UPHY_STATUS_TX_P_UP_STATUS) { - /* return success if TX lane is already UP */ - return 0; - } - - val = osi_readla(osi_core, - (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL); - val |= lane_init_en; - osi_writela(osi_core, val, - (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL); + val = osi_readla(osi_core, + (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL); + val |= lane_init_en; + osi_writela(osi_core, val, + (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL); - count = 0; - while (cond == COND_NOT_MET) { - if (count > retry) { - return -1; - } - count++; + count = 0; + while (cond == COND_NOT_MET) { + if (count > retry) { + ret = -1; + goto fail; + } + count++; - val = osi_readla(osi_core, - (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL); - if ((val & lane_init_en) == OSI_NONE) { - /* exit loop */ - cond = COND_MET; - } else { - osi_core->osd_ops.udelay(500U); + val = osi_readla(osi_core, + (nveu8_t *)xpcs_base + XPCS_WRAP_UPHY_HW_INIT_CTRL); + if ((val & lane_init_en) == OSI_NONE) { + /* exit loop */ + cond = COND_MET; + } else { + /* Max wait time is 1usec. + * Most of the time loop got exited in first iteration. + * but added an extra count of 4 for safer side + */ + osi_core->osd_ops.udelay(1U); + } } } - return 0; +fail: + return ret; } /** @@ -285,15 +300,17 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core, static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core) { void *xpcs_base = osi_core->xpcs_base; - nveu32_t retry = XPCS_RETRY_COUNT; + nveu32_t retry = RETRY_COUNT; nve32_t cond = COND_NOT_MET; nveu32_t val = 0; nveu32_t count; + nve32_t ret = 0; count = 0; while (cond == COND_NOT_MET) { if (count > retry) { - return -1; + ret = -1; + goto fail; } count++; @@ -304,14 +321,19 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core) /* exit loop */ cond = COND_MET; } else { - osi_core->osd_ops.udelay(500U); + /* Maximum wait delay as per HW team is 1msec. + * So add a loop for 1000 iterations with 1usec delay, + * so that if check get satisfies before 1msec will come + * out of loop and it can save some boot time + */ + osi_core->osd_ops.udelay(1U); } } /* Clear the status */ osi_writela(osi_core, val, (nveu8_t *)xpcs_base + XPCS_WRAP_IRQ_STATUS); - - return 0; +fail: + return ret; } /** @@ -327,16 +349,19 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core) */ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core) { - unsigned int retry = 1000; - unsigned int count; + struct core_local *l_core = (struct core_local *)(void *)osi_core; + nveu32_t retry = 7U; + nveu32_t count; nveu32_t val = 0; - int cond; + nve32_t cond; + nve32_t ret = 0; if (xpcs_uphy_lane_bring_up(osi_core, XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "UPHY TX lane bring-up failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } val = osi_readla(osi_core, @@ -389,7 +414,8 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core) count = 0; while (cond == COND_NOT_MET) { if (count > retry) { - return -1; + ret = -1; + goto fail; } count++; @@ -397,10 +423,17 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core) val = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base + XPCS_WRAP_UPHY_RX_CONTROL_0_0); - if ((val & XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CAL_EN) == 0) { + if ((val & XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CAL_EN) == 0U) { cond = COND_MET; } else { - osi_core->osd_ops.udelay(1000U); + /* Maximum wait delay as per HW team is 100 usec. + * But most of the time as per experiments it takes + * around 14usec to satisy the condition, so add a + * minimum delay of 14usec and loop it for 7times. + * With this 14usec delay condition gets satifies + * in first iteration itself. + */ + osi_core->osd_ops.udelay(14U); } } @@ -433,12 +466,20 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core) XPCS_WRAP_UPHY_RX_CONTROL_0_0); if (xpcs_check_pcs_lock_status(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "Failed to get PCS block lock\n", 0ULL); - return -1; + if (l_core->lane_status == OSI_ENABLE) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "Failed to get PCS block lock\n", 0ULL); + l_core->lane_status = OSI_DISABLE; + } + ret = -1; + goto fail; + } else { + OSI_CORE_INFO(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "PCS block lock SUCCESS\n", 0ULL); + l_core->lane_status = OSI_ENABLE; } - - return 0; +fail: + return ret; } /** @@ -451,28 +492,25 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -int xpcs_init(struct osi_core_priv_data *osi_core) +nve32_t xpcs_init(struct osi_core_priv_data *osi_core) { void *xpcs_base = osi_core->xpcs_base; - unsigned int retry = 1000; - unsigned int count; - unsigned int ctrl = 0; - int cond = 1; - int ret = 0; + nveu32_t retry = 1000; + nveu32_t count; + nveu32_t ctrl = 0; + nve32_t cond = 1; + nve32_t ret = 0; if (osi_core->xpcs_base == OSI_NULL) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, "XPCS base is NULL", 0ULL); - /* TODO: Remove this once silicon arrives */ - return 0; + ret = -1; + goto fail; } - if (osi_core->pre_si != OSI_ENABLE) { - if (xpcs_lane_bring_up(osi_core) < 0) { - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "TX/RX lane bring-up failed\n", 0ULL); - return -1; - } + if (xpcs_lane_bring_up(osi_core) < 0) { + ret = -1; + goto fail; } /* Switching to USXGMII Mode based on @@ -484,7 +522,7 @@ int xpcs_init(struct osi_core_priv_data *osi_core) ctrl |= XPCS_SR_XS_PCS_CTRL2_PCS_TYPE_SEL_BASE_R; ret = xpcs_write_safety(osi_core, XPCS_SR_XS_PCS_CTRL2, ctrl); if (ret != 0) { - return ret; + goto fail; } /* 2. enable USXGMII Mode inside DWC_xpcs */ @@ -501,7 +539,7 @@ int xpcs_init(struct osi_core_priv_data *osi_core) ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_KR_CTRL, ctrl); if (ret != 0) { - return ret; + goto fail; } /* 4. Program PHY to operate at 10Gbps/5Gbps/2Gbps * this step not required since PHY speed programming @@ -512,7 +550,7 @@ int xpcs_init(struct osi_core_priv_data *osi_core) ctrl |= XPCS_VR_XS_PCS_DIG_CTRL1_USXG_EN; ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl); if (ret != 0) { - return ret; + goto fail; } /* XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST bit is self clearing @@ -528,7 +566,8 @@ int xpcs_init(struct osi_core_priv_data *osi_core) count = 0; while (cond == 1) { if (count > retry) { - return -1; + ret = -1; + goto fail; } count++; @@ -551,13 +590,13 @@ int xpcs_init(struct osi_core_priv_data *osi_core) ctrl &= ~XPCS_SR_AN_CTRL_AN_EN; ret = xpcs_write_safety(osi_core, XPCS_SR_AN_CTRL, ctrl); if (ret != 0) { - return ret; + goto fail; } ctrl = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_DIG_CTRL1); ctrl |= XPCS_VR_XS_PCS_DIG_CTRL1_CL37_BP; ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_DIG_CTRL1, ctrl); if (ret != 0) { - return ret; + goto fail; } } @@ -569,10 +608,11 @@ int xpcs_init(struct osi_core_priv_data *osi_core) /* 11. XPCS configured as MAC-side USGMII - NA */ /* 13. TODO: If there is interrupt enabled for AN interrupt */ - - return 0; +fail: + return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief xpcs_eee - XPCS enable/disable EEE * @@ -585,54 +625,55 @@ int xpcs_init(struct osi_core_priv_data *osi_core) * @retval 0 on success * @retval -1 on failure. */ -int xpcs_eee(struct osi_core_priv_data *osi_core, unsigned int en_dis) +nve32_t xpcs_eee(struct osi_core_priv_data *osi_core, nveu32_t en_dis) { void *xpcs_base = osi_core->xpcs_base; - unsigned int val = 0x0U; - int ret = 0; + nveu32_t val = 0x0U; + nve32_t ret = 0; - if (en_dis != OSI_ENABLE && en_dis != OSI_DISABLE) { - return -1; + if ((en_dis != OSI_ENABLE) && (en_dis != OSI_DISABLE)) { + ret = -1; + goto fail; } - if (xpcs_base == OSI_NULL) - return -1; + if (xpcs_base == OSI_NULL) { + ret = -1; + goto fail; + } if (en_dis == OSI_DISABLE) { val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL0); val &= ~XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN; val &= ~XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN; ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL0, val); + } else { + + /* 1. Check if DWC_xpcs supports the EEE feature by + * reading the SR_XS_PCS_EEE_ABL register + * 1000BASEX-Only is different config then else so can (skip) + */ + + /* 2. Program various timers used in the EEE mode depending on the + * clk_eee_i clock frequency. default times are same as IEEE std + * clk_eee_i() is 102MHz. MULT_FACT_100NS = 9 because 9.8ns*10 = 98 + * which is between 80 and 120 this leads to default setting match + */ + + val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL0); + /* 3. If FEC is enabled in the KR mode (skip in FPGA)*/ + /* 4. enable the EEE feature on the Tx path and Rx path */ + val |= (XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN | + XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN); + ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL0, val); if (ret != 0) { - return ret; + goto fail; } - return 0; - } - - /* 1. Check if DWC_xpcs supports the EEE feature by - * reading the SR_XS_PCS_EEE_ABL register - * 1000BASEX-Only is different config then else so can (skip) */ - - /* 2. Program various timers used in the EEE mode depending on the - * clk_eee_i clock frequency. default times are same as IEEE std - * clk_eee_i() is 102MHz. MULT_FACT_100NS = 9 because 9.8ns*10 = 98 - * which is between 80 and 120 this leads to default setting match */ - - val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL0); - /* 3. If FEC is enabled in the KR mode (skip in FPGA)*/ - /* 4. enable the EEE feature on the Tx path and Rx path */ - val |= (XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN | - XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN); - ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL0, val); - if (ret != 0) { - return ret; - } - /* Transparent Tx LPI Mode Enable */ - val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL1); - val |= XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI; - ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL1, val); - if (ret != 0) { - return ret; + /* Transparent Tx LPI Mode Enable */ + val = xpcs_read(xpcs_base, XPCS_VR_XS_PCS_EEE_MCTRL1); + val |= XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI; + ret = xpcs_write_safety(osi_core, XPCS_VR_XS_PCS_EEE_MCTRL1, val); } - return 0; +fail: + return ret; } +#endif /* !OSI_STRIPPED_LIB */ diff --git a/kernel/nvethernetrm/osi/core/xpcs.h b/kernel/nvethernetrm/osi/core/xpcs.h index 070e44147a..f04ad0b075 100644 --- a/kernel/nvethernetrm/osi/core/xpcs.h +++ b/kernel/nvethernetrm/osi/core/xpcs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,15 +26,6 @@ #include "../osi/common/common.h" #include -/** - * @addtogroup XPCS helper macros - * - * @brief XPCS helper macros. - * @{ - */ -#define XPCS_RETRY_COUNT (RETRY_COUNT * (2U)) -/** @} */ - /** * @addtogroup XPCS Register offsets * @@ -42,24 +33,27 @@ * @{ */ #define XPCS_ADDRESS 0x03FC -#define XPCS_SR_XS_PCS_CTRL1 0xC0000 #define XPCS_SR_XS_PCS_STS1 0xC0004 #define XPCS_SR_XS_PCS_CTRL2 0xC001C -#define XPCS_SR_XS_PCS_EEE_ABL 0xC0050 -#define XPCS_SR_XS_PCS_EEE_ABL2 0xC0054 #define XPCS_VR_XS_PCS_DIG_CTRL1 0xE0000 #define XPCS_VR_XS_PCS_KR_CTRL 0xE001C #define XPCS_SR_AN_CTRL 0x1C0000 #define XPCS_SR_MII_CTRL 0x7C0000 #define XPCS_VR_MII_AN_INTR_STS 0x7E0008 -#define XPCS_VR_XS_PCS_EEE_MCTRL0 0xE0018 -#define XPCS_VR_XS_PCS_EEE_MCTRL1 0xE002C #define XPCS_WRAP_UPHY_HW_INIT_CTRL 0x8020 #define XPCS_WRAP_UPHY_STATUS 0x8044 #define XPCS_WRAP_IRQ_STATUS 0x8050 #define XPCS_WRAP_UPHY_RX_CONTROL_0_0 0x801C /** @} */ +#ifndef OSI_STRIPPED_LIB +#define XPCS_VR_XS_PCS_EEE_MCTRL0 0xE0018 +#define XPCS_VR_XS_PCS_EEE_MCTRL1 0xE002C + +#define XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI OSI_BIT(0) +#define XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN OSI_BIT(0) +#define XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN OSI_BIT(1) +#endif /* !OSI_STRIPPED_LIB */ /** * @addtogroup XPCS-BIT Register bit fileds @@ -67,16 +61,12 @@ * @brief XPCS register bit fields * @{ */ -#define XPCS_SR_XS_PCS_CTRL1_RST OSI_BIT(15) #define XPCS_SR_XS_PCS_CTRL2_PCS_TYPE_SEL_BASE_R 0x0U #define XPCS_SR_XS_PCS_STS1_RLU OSI_BIT(2) #define XPCS_VR_XS_PCS_DIG_CTRL1_USXG_EN OSI_BIT(9) #define XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST OSI_BIT(15) #define XPCS_VR_XS_PCS_DIG_CTRL1_USRA_RST OSI_BIT(10) #define XPCS_VR_XS_PCS_DIG_CTRL1_CL37_BP OSI_BIT(12) -#define XPCS_VR_XS_PCS_EEE_MCTRL1_TRN_LPI OSI_BIT(0) -#define XPCS_VR_XS_PCS_EEE_MCTRL0_LTX_EN OSI_BIT(0) -#define XPCS_VR_XS_PCS_EEE_MCTRL0_LRX_EN OSI_BIT(1) #define XPCS_SR_AN_CTRL_AN_EN OSI_BIT(12) #define XPCS_SR_MII_CTRL_AN_ENABLE OSI_BIT(12) #define XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR OSI_BIT(0) @@ -95,7 +85,6 @@ OSI_BIT(10)) #define XPCS_VR_XS_PCS_KR_CTRL_USXG_MODE_5G OSI_BIT(10) #define XPCS_WRAP_UPHY_HW_INIT_CTRL_TX_EN OSI_BIT(0) -#define XPCS_WRAP_UPHY_HW_INIT_CTRL_RX_EN OSI_BIT(2) #define XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS OSI_BIT(6) #define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_DATA_EN OSI_BIT(0) #define XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_IDDQ OSI_BIT(4) @@ -114,20 +103,19 @@ #define XPCS_CORE_CORRECTABLE_ERR OSI_BIT(10) #define XPCS_CORE_UNCORRECTABLE_ERR OSI_BIT(9) #define XPCS_REGISTER_PARITY_ERR OSI_BIT(8) -#define XPCS_BASE_PMA_MMD_SR_PMA_KR_FEC_CTRL 0x402AC -#define EN_ERR_IND OSI_BIT(1) -#define FEC_EN OSI_BIT(0) #define XPCS_VR_XS_PCS_SFTY_UE_INTR0 0xE03C0 #define XPCS_VR_XS_PCS_SFTY_CE_INTR 0xE03C8 #define XPCS_VR_XS_PCS_SFTY_TMR_CTRL 0xE03D4 -#define XPCS_SFTY_1US_MULT_MASK 0xFF +#define XPCS_SFTY_1US_MULT_MASK 0xFFU #define XPCS_SFTY_1US_MULT_SHIFT 0U #endif /** @} */ -int xpcs_init(struct osi_core_priv_data *osi_core); -int xpcs_start(struct osi_core_priv_data *osi_core); -int xpcs_eee(struct osi_core_priv_data *osi_core, unsigned int en_dis); +nve32_t xpcs_init(struct osi_core_priv_data *osi_core); +nve32_t xpcs_start(struct osi_core_priv_data *osi_core); +#ifndef OSI_STRIPPED_LIB +nve32_t xpcs_eee(struct osi_core_priv_data *osi_core, nveu32_t en_dis); +#endif /* !OSI_STRIPPED_LIB */ /** * @brief xpcs_read - read from xpcs. @@ -139,11 +127,11 @@ int xpcs_eee(struct osi_core_priv_data *osi_core, unsigned int en_dis); * * @retval value read from xpcs register. */ -static inline unsigned int xpcs_read(void *xpcs_base, unsigned int reg_addr) +static inline nveu32_t xpcs_read(void *xpcs_base, nveu32_t reg_addr) { osi_writel(((reg_addr >> XPCS_REG_ADDR_SHIFT) & XPCS_REG_ADDR_MASK), - ((unsigned char *)xpcs_base + XPCS_ADDRESS)); - return osi_readl((unsigned char *)xpcs_base + + ((nveu8_t *)xpcs_base + XPCS_ADDRESS)); + return osi_readl((nveu8_t *)xpcs_base + ((reg_addr) & XPCS_REG_VALUE_MASK)); } @@ -156,12 +144,12 @@ static inline unsigned int xpcs_read(void *xpcs_base, unsigned int reg_addr) * @param[in] reg_addr: register address for writing * @param[in] val: write value to register address */ -static inline void xpcs_write(void *xpcs_base, unsigned int reg_addr, - unsigned int val) +static inline void xpcs_write(void *xpcs_base, nveu32_t reg_addr, + nveu32_t val) { osi_writel(((reg_addr >> XPCS_REG_ADDR_SHIFT) & XPCS_REG_ADDR_MASK), - ((unsigned char *)xpcs_base + XPCS_ADDRESS)); - osi_writel(val, (unsigned char *)xpcs_base + + ((nveu8_t *)xpcs_base + XPCS_ADDRESS)); + osi_writel(val, (nveu8_t *)xpcs_base + (((reg_addr) & XPCS_REG_VALUE_MASK))); } @@ -176,28 +164,33 @@ static inline void xpcs_write(void *xpcs_base, unsigned int reg_addr, * @param[in] val: write value to register address * * @retval 0 on success - * @retval -1 on failure. + * @retval XPCS_WRITE_FAIL_CODE on failure * */ -static inline int xpcs_write_safety(struct osi_core_priv_data *osi_core, - unsigned int reg_addr, - unsigned int val) +static inline nve32_t xpcs_write_safety(struct osi_core_priv_data *osi_core, + nveu32_t reg_addr, + nveu32_t val) { void *xpcs_base = osi_core->xpcs_base; - unsigned int read_val; - int retry = 10; + nveu32_t read_val; + nve32_t retry = 10; + nve32_t ret = XPCS_WRITE_FAIL_CODE; while (--retry > 0) { xpcs_write(xpcs_base, reg_addr, val); read_val = xpcs_read(xpcs_base, reg_addr); if (val == read_val) { - return 0; + ret = 0; + break; } osi_core->osd_ops.udelay(OSI_DELAY_1US); } - OSI_CORE_ERR(OSI_NULL, OSI_LOG_ARG_HW_FAIL, - "xpcs_write_safety failed", reg_addr); - return -1; + if (ret != 0) { + OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL, + "xpcs_write_safety failed", reg_addr); + } + + return ret; } #endif diff --git a/kernel/nvethernetrm/osi/dma/Makefile.interface.tmk b/kernel/nvethernetrm/osi/dma/Makefile.interface.tmk index c12901e429..693774c7bb 100644 --- a/kernel/nvethernetrm/osi/dma/Makefile.interface.tmk +++ b/kernel/nvethernetrm/osi/dma/Makefile.interface.tmk @@ -1,6 +1,6 @@ ################################### tell Emacs this is a -*- makefile-gmake -*- # -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -26,7 +26,11 @@ ifdef NV_INTERFACE_FLAG_SHARED_LIBRARY_SECTION NV_INTERFACE_NAME := nvethernetcl +ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY), 0) NV_INTERFACE_EXPORTS := lib$(NV_INTERFACE_NAME) +else +NV_INTERFACE_EXPORTS := lib$(NV_INTERFACE_NAME)_safety +endif NV_INTERFACE_PUBLIC_INCLUDES := \ ./include endif diff --git a/kernel/nvethernetrm/osi/dma/Makefile.tmk b/kernel/nvethernetrm/osi/dma/Makefile.tmk index 7e1e52d7c0..94ffaad1d4 100644 --- a/kernel/nvethernetrm/osi/dma/Makefile.tmk +++ b/kernel/nvethernetrm/osi/dma/Makefile.tmk @@ -1,6 +1,6 @@ ################################### tell Emacs this is a -*- makefile-gmake -*- # -# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -30,13 +30,10 @@ NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1 NV_COMPONENT_NAME := nvethernetcl NV_COMPONENT_OWN_INTERFACE_DIR := . NV_COMPONENT_SOURCES := \ - eqos_dma.c \ - osi_dma.c \ - osi_dma_txrx.c \ - mgbe_dma.c \ - eqos_desc.c \ - mgbe_desc.c \ - debug.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/osi_dma.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/osi_dma_txrx.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/eqos_desc.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/mgbe_desc.c \ $(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \ $(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \ $(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c @@ -45,10 +42,17 @@ NV_COMPONENT_INCLUDES := \ $(NV_SOURCE)/nvethernetrm/include \ $(NV_SOURCE)/nvethernetrm/osi/common/include -ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),0) -NV_COMPONENT_CFLAGS += -DOSI_DEBUG +include $(NV_SOURCE)/nvethernetrm/include/config.tmk + +ifeq ($(OSI_DEBUG),1) +NV_COMPONENT_SOURCES += $(NV_SOURCE)/nvethernetrm/osi/dma/debug.c endif +ifeq ($(OSI_STRIPPED_LIB),0) +NV_COMPONENT_SOURCES += \ + $(NV_SOURCE)/nvethernetrm/osi/dma/mgbe_dma.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/eqos_dma.c +endif include $(NV_BUILD_SHARED_LIBRARY) endif diff --git a/kernel/nvethernetrm/osi/dma/debug.c b/kernel/nvethernetrm/osi/dma/debug.c index 3ccb451158..5b0fa232d9 100644 --- a/kernel/nvethernetrm/osi/dma/debug.c +++ b/kernel/nvethernetrm/osi/dma/debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -35,7 +35,7 @@ static void dump_struct(struct osi_dma_priv_data *osi_dma, unsigned char *ptr, unsigned long size) { - nveu32_t i = 0, rem, j; + nveu32_t i = 0, rem, j = 0; unsigned long temp; if (ptr == OSI_NULL) { @@ -129,7 +129,9 @@ void reg_dump(struct osi_dma_priv_data *osi_dma) max_addr = 0x14EC; break; case OSI_MGBE_MAC_3_10: +#ifndef OSI_STRIPPED_LIB case OSI_MGBE_MAC_4_00: +#endif addr = 0x3100; max_addr = 0x35FC; break; @@ -205,9 +207,9 @@ static void tx_desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx, int cnt; if (f_idx > l_idx) { - cnt = l_idx + osi_dma->tx_ring_sz - f_idx; + cnt = (int)(l_idx + osi_dma->tx_ring_sz - f_idx); } else { - cnt = l_idx - f_idx; + cnt = (int)(l_idx - f_idx); } for (i = f_idx; cnt >= 0; cnt--) { @@ -250,6 +252,8 @@ void desc_dump(struct osi_dma_priv_data *osi_dma, unsigned int f_idx, rx_desc_dump(osi_dma, f_idx, chan); break; default: + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid desc dump flag\n", 0ULL); break; } } diff --git a/kernel/nvethernetrm/osi/dma/dma_local.h b/kernel/nvethernetrm/osi/dma/dma_local.h index 465fd03702..b73d6afd5c 100644 --- a/kernel/nvethernetrm/osi/dma/dma_local.h +++ b/kernel/nvethernetrm/osi/dma/dma_local.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,8 +24,10 @@ #ifndef INCLUDED_DMA_LOCAL_H #define INCLUDED_DMA_LOCAL_H +#include "../osi/common/common.h" #include #include "eqos_dma.h" +#include "mgbe_dma.h" /** * @brief Maximum number of OSI DMA instances. @@ -46,56 +48,17 @@ * @brief MAC DMA Channel operations */ struct dma_chan_ops { - /** Called to set Transmit Ring length */ - void (*set_tx_ring_len)(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - nveu32_t len); - /** Called to set Transmit Ring Base address */ - void (*set_tx_ring_start_addr)(void *addr, nveu32_t chan, - nveu64_t base_addr); - /** Called to update Tx Ring tail pointer */ - void (*update_tx_tailptr)(void *addr, nveu32_t chan, - nveu64_t tailptr); - /** Called to set Receive channel ring length */ - void (*set_rx_ring_len)(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - nveu32_t len); - /** Called to set receive channel ring base address */ - void (*set_rx_ring_start_addr)(void *addr, nveu32_t chan, - nveu64_t base_addr); - /** Called to update Rx ring tail pointer */ - void (*update_rx_tailptr)(void *addr, nveu32_t chan, - nveu64_t tailptr); - /** Called to disable DMA Tx channel interrupts at wrapper level */ - void (*disable_chan_tx_intr)(void *addr, nveu32_t chan); - /** Called to enable DMA Tx channel interrupts at wrapper level */ - void (*enable_chan_tx_intr)(void *addr, nveu32_t chan); - /** Called to disable DMA Rx channel interrupts at wrapper level */ - void (*disable_chan_rx_intr)(void *addr, nveu32_t chan); - /** Called to enable DMA Rx channel interrupts at wrapper level */ - void (*enable_chan_rx_intr)(void *addr, nveu32_t chan); - /** Called to start the Tx/Rx DMA */ - void (*start_dma)(struct osi_dma_priv_data *osi_dma, nveu32_t chan); - /** Called to stop the Tx/Rx DMA */ - void (*stop_dma)(struct osi_dma_priv_data *osi_dma, nveu32_t chan); - /** Called to initialize the DMA channel */ - nve32_t (*init_dma_channel)(struct osi_dma_priv_data *osi_dma); - /** Called to set Rx buffer length */ - void (*set_rx_buf_len)(struct osi_dma_priv_data *osi_dma); #ifndef OSI_STRIPPED_LIB - /** Called periodically to read and validate safety critical - * registers against last written value */ - nve32_t (*validate_regs)(struct osi_dma_priv_data *osi_dma); /** Called to configure the DMA channel slot function */ void (*config_slot)(struct osi_dma_priv_data *osi_dma, nveu32_t chan, nveu32_t set, nveu32_t interval); #endif /* !OSI_STRIPPED_LIB */ - /** Called to clear VM Tx interrupt */ - void (*clear_vm_tx_intr)(void *addr, nveu32_t chan); - /** Called to clear VM Rx interrupt */ - void (*clear_vm_rx_intr)(void *addr, nveu32_t chan); +#ifdef OSI_DEBUG + /** Called to enable/disable debug interrupt */ + void (*debug_intr_config)(struct osi_dma_priv_data *osi_dma); +#endif }; /** @@ -103,8 +66,9 @@ struct dma_chan_ops { */ struct desc_ops { /** Called to get receive checksum */ - void (*get_rx_csum)(struct osi_rx_desc *rx_desc, + void (*get_rx_csum)(const struct osi_rx_desc *const rx_desc, struct osi_rx_pkt_cx *rx_pkt_cx); +#ifndef OSI_STRIPPED_LIB /** Called to get rx error stats */ void (*update_rx_err_stats)(struct osi_rx_desc *rx_desc, struct osi_pkt_err_stats *stats); @@ -114,11 +78,12 @@ struct desc_ops { /** Called to get rx HASH from descriptor */ void (*get_rx_hash)(struct osi_rx_desc *rx_desc, struct osi_rx_pkt_cx *rx_pkt_cx); +#endif /* !OSI_STRIPPED_LIB */ /** Called to get RX hw timestamp */ - int (*get_rx_hwstamp)(struct osi_dma_priv_data *osi_dma, - struct osi_rx_desc *rx_desc, - struct osi_rx_desc *context_desc, - struct osi_rx_pkt_cx *rx_pkt_cx); + nve32_t (*get_rx_hwstamp)(const struct osi_dma_priv_data *const osi_dma, + const struct osi_rx_desc *const rx_desc, + const struct osi_rx_desc *const context_desc, + struct osi_rx_pkt_cx *rx_pkt_cx); }; /** @@ -139,14 +104,15 @@ struct dma_local { nveu32_t init_done; /** Holds the MAC version of MAC controller */ nveu32_t mac_ver; - /** Represents whether DMA interrupts are VM or Non-VM */ - nveu32_t vm_intr; /** Magic number to validate osi_dma pointer */ nveu64_t magic_num; /** Maximum number of DMA channels */ - nveu32_t max_chans; + nveu32_t num_max_chans; + /** Exact MAC used across SOCs 0:Legacy EQOS, 1:Orin EQOS, 2:Orin MGBE */ + nveu32_t l_mac_ver; }; +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_init_dma_chan_ops - Initialize eqos DMA operations. * @@ -172,18 +138,19 @@ void eqos_init_dma_chan_ops(struct dma_chan_ops *ops); * - De-initialization: No */ void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops); +#endif /* !OSI_STRIPPED_LIB */ /** * @brief eqos_get_desc_ops - EQOS init DMA descriptor operations */ -void eqos_init_desc_ops(struct desc_ops *d_ops); +void eqos_init_desc_ops(struct desc_ops *p_dops); /** * @brief mgbe_get_desc_ops - MGBE init DMA descriptor operations */ -void mgbe_init_desc_ops(struct desc_ops *d_ops); +void mgbe_init_desc_ops(struct desc_ops *p_dops); -nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma); +nve32_t init_desc_ops(const struct osi_dma_priv_data *const osi_dma); /** * @brief osi_hw_transmit - Initialize Tx DMA descriptors for a channel @@ -196,8 +163,7 @@ nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma); * * @param[in, out] osi_dma: OSI DMA private data. * @param[in] tx_ring: DMA Tx ring. - * @param[in] ops: DMA channel operations. - * @param[in] chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS. + * @param[in] dma_chan: DMA Tx channel number. Max OSI_EQOS_MAX_NUM_CHANS. * * @note * API Group: @@ -207,8 +173,7 @@ nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma); */ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, struct osi_tx_ring *tx_ring, - struct dma_chan_ops *ops, - nveu32_t chan); + nveu32_t dma_chan); /* Function prototype needed for misra */ @@ -232,41 +197,36 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, * @retval 0 on success * @retval -1 on failure. */ -nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma, - struct dma_chan_ops *ops); +nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma); static inline nveu32_t is_power_of_two(nveu32_t num) { + nveu32_t ret = OSI_DISABLE; + if ((num > 0U) && ((num & (num - 1U)) == 0U)) { - return OSI_ENABLE; + ret = OSI_ENABLE; } - return OSI_DISABLE; + return ret; } -/** - * @addtogroup Helper Helper MACROS - * - * @brief EQOS generic helper MACROS. - * @{ - */ -#define CHECK_CHAN_BOUND(chan) \ - { \ - if ((chan) >= OSI_EQOS_MAX_NUM_CHANS) { \ - return; \ - } \ - } - -#define MGBE_CHECK_CHAN_BOUND(chan) \ -{ \ - if ((chan) >= OSI_MGBE_MAX_NUM_CHANS) { \ - return; \ - } \ -} \ - #define BOOLEAN_FALSE (0U != 0U) #define L32(data) ((nveu32_t)((data) & 0xFFFFFFFFU)) #define H32(data) ((nveu32_t)(((data) & 0xFFFFFFFF00000000UL) >> 32UL)) + +static inline void update_rx_tail_ptr(const struct osi_dma_priv_data *const osi_dma, + nveu32_t dma_chan, + nveu64_t tailptr) +{ + nveu32_t chan = dma_chan & 0xFU; + const nveu32_t tail_ptr_reg[2] = { + EQOS_DMA_CHX_RDTP(chan), + MGBE_DMA_CHX_RDTLP(chan) + }; + + osi_writel(L32(tailptr), (nveu8_t *)osi_dma->base + tail_ptr_reg[osi_dma->mac]); +} + /** @} */ #endif /* INCLUDED_DMA_LOCAL_H */ diff --git a/kernel/nvethernetrm/osi/dma/eqos_desc.c b/kernel/nvethernetrm/osi/dma/eqos_desc.c index f45b200f44..ed09014cac 100644 --- a/kernel/nvethernetrm/osi/dma/eqos_desc.c +++ b/kernel/nvethernetrm/osi/dma/eqos_desc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,6 +23,7 @@ #include "dma_local.h" #include "hw_desc.h" +#ifndef OSI_STRIPPED_LIB /** * @brief eqos_get_rx_vlan - Get Rx VLAN from descriptor * @@ -77,6 +78,22 @@ static inline void eqos_update_rx_err_stats(struct osi_rx_desc *rx_desc, } } +/** + * @brief eqos_get_rx_hash - Get Rx packet hash from descriptor if valid + * + * Algorithm: This routine will be invoked by OSI layer itself to get received + * packet Hash from descriptor if RSS hash is valid and it also sets the type + * of RSS hash. + * + * @param[in] rx_desc: Rx Descriptor. + * @param[in] rx_pkt_cx: Per-Rx packet context structure + */ +static void eqos_get_rx_hash(OSI_UNUSED struct osi_rx_desc *rx_desc, + OSI_UNUSED struct osi_rx_pkt_cx *rx_pkt_cx) +{ +} +#endif /* !OSI_STRIPPED_LIB */ + /** * @brief eqos_get_rx_csum - Get the Rx checksum from descriptor if valid * @@ -98,7 +115,7 @@ static inline void eqos_update_rx_err_stats(struct osi_rx_desc *rx_desc, * @param[in, out] rx_desc: Rx descriptor * @param[in, out] rx_pkt_cx: Per-Rx packet context structure */ -static void eqos_get_rx_csum(struct osi_rx_desc *rx_desc, +static void eqos_get_rx_csum(const struct osi_rx_desc *const rx_desc, struct osi_rx_pkt_cx *rx_pkt_cx) { nveu32_t pkt_type; @@ -108,66 +125,49 @@ static void eqos_get_rx_csum(struct osi_rx_desc *rx_desc, * Set none/unnecessary bit as well for other OS to check and * take proper actions. */ - if ((rx_desc->rdes3 & RDES3_RS1V) != RDES3_RS1V) { - return; - } + if ((rx_desc->rdes3 & RDES3_RS1V) == RDES3_RS1V) { + if ((rx_desc->rdes1 & + (RDES1_IPCE | RDES1_IPCB | RDES1_IPHE)) == OSI_DISABLE) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY; + } - if ((rx_desc->rdes1 & - (RDES1_IPCE | RDES1_IPCB | RDES1_IPHE)) == OSI_DISABLE) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY; - } + if ((rx_desc->rdes1 & RDES1_IPCB) != RDES1_IPCB) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4; + if ((rx_desc->rdes1 & RDES1_IPHE) == RDES1_IPHE) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4_BAD; + } - if ((rx_desc->rdes1 & RDES1_IPCB) != OSI_DISABLE) { - return; - } + pkt_type = rx_desc->rdes1 & RDES1_PT_MASK; + if ((rx_desc->rdes1 & RDES1_IPV4) == RDES1_IPV4) { + if (pkt_type == RDES1_PT_UDP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv4; + } else if (pkt_type == RDES1_PT_TCP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv4; - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4; - if ((rx_desc->rdes1 & RDES1_IPHE) == RDES1_IPHE) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4_BAD; - } + } else { + /* Do nothing */ + } + } else if ((rx_desc->rdes1 & RDES1_IPV6) == RDES1_IPV6) { + if (pkt_type == RDES1_PT_UDP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv6; + } else if (pkt_type == RDES1_PT_TCP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv6; + + } else { + /* Do nothing */ + } - pkt_type = rx_desc->rdes1 & RDES1_PT_MASK; - if ((rx_desc->rdes1 & RDES1_IPV4) == RDES1_IPV4) { - if (pkt_type == RDES1_PT_UDP) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv4; - } else if (pkt_type == RDES1_PT_TCP) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv4; + } else { + /* Do nothing */ + } - } else { - /* Do nothing */ - } - } else if ((rx_desc->rdes1 & RDES1_IPV6) == RDES1_IPV6) { - if (pkt_type == RDES1_PT_UDP) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv6; - } else if (pkt_type == RDES1_PT_TCP) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv6; - - } else { - /* Do nothing */ + if ((rx_desc->rdes1 & RDES1_IPCE) == RDES1_IPCE) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCP_UDP_BAD; + } } - - } else { - /* Do nothing */ } - if ((rx_desc->rdes1 & RDES1_IPCE) == RDES1_IPCE) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCP_UDP_BAD; - } -} - -/** - * @brief eqos_get_rx_hash - Get Rx packet hash from descriptor if valid - * - * Algorithm: This routine will be invoked by OSI layer itself to get received - * packet Hash from descriptor if RSS hash is valid and it also sets the type - * of RSS hash. - * - * @param[in] rx_desc: Rx Descriptor. - * @param[in] rx_pkt_cx: Per-Rx packet context structure - */ -static void eqos_get_rx_hash(OSI_UNUSED struct osi_rx_desc *rx_desc, - OSI_UNUSED struct osi_rx_pkt_cx *rx_pkt_cx) -{ + return; } /** @@ -186,12 +186,13 @@ static void eqos_get_rx_hash(OSI_UNUSED struct osi_rx_desc *rx_desc, * @retval -1 if TimeStamp is not available * @retval 0 if TimeStamp is available. */ -static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, - struct osi_rx_desc *rx_desc, - struct osi_rx_desc *context_desc, - struct osi_rx_pkt_cx *rx_pkt_cx) +static nve32_t eqos_get_rx_hwstamp(const struct osi_dma_priv_data *const osi_dma, + const struct osi_rx_desc *const rx_desc, + const struct osi_rx_desc *const context_desc, + struct osi_rx_pkt_cx *rx_pkt_cx) { - int retry; + nve32_t ret = 0; + nve32_t retry; /* Check for RS1V/TSA/TD valid */ if (((rx_desc->rdes3 & RDES3_RS1V) == RDES3_RS1V) && @@ -205,7 +206,8 @@ static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, OSI_INVALID_VALUE) && (context_desc->rdes1 == OSI_INVALID_VALUE)) { - return -1; + ret = -1; + goto fail; } /* Update rx pkt context flags to indicate * PTP */ @@ -219,27 +221,31 @@ static int eqos_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, } if (retry == 10) { /* Timed out waiting for Rx timestamp */ - return -1; + ret = -1; + goto fail; } rx_pkt_cx->ns = context_desc->rdes0 + (OSI_NSEC_PER_SEC * context_desc->rdes1); if (rx_pkt_cx->ns < context_desc->rdes0) { /* Will not hit this case */ - return -1; + ret = -1; + goto fail; } } else { - return -1; + ret = -1; } - - return 0; +fail: + return ret; } -void eqos_init_desc_ops(struct desc_ops *d_ops) +void eqos_init_desc_ops(struct desc_ops *p_dops) { - d_ops->get_rx_csum = eqos_get_rx_csum; - d_ops->update_rx_err_stats = eqos_update_rx_err_stats; - d_ops->get_rx_vlan = eqos_get_rx_vlan; - d_ops->get_rx_hash = eqos_get_rx_hash; - d_ops->get_rx_hwstamp = eqos_get_rx_hwstamp; +#ifndef OSI_STRIPPED_LIB + p_dops->update_rx_err_stats = eqos_update_rx_err_stats; + p_dops->get_rx_vlan = eqos_get_rx_vlan; + p_dops->get_rx_hash = eqos_get_rx_hash; +#endif /* !OSI_STRIPPED_LIB */ + p_dops->get_rx_csum = eqos_get_rx_csum; + p_dops->get_rx_hwstamp = eqos_get_rx_hwstamp; } diff --git a/kernel/nvethernetrm/osi/dma/eqos_dma.c b/kernel/nvethernetrm/osi/dma/eqos_dma.c index 095ddbfe50..6f6fc6dcde 100644 --- a/kernel/nvethernetrm/osi/dma/eqos_dma.c +++ b/kernel/nvethernetrm/osi/dma/eqos_dma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,825 +20,10 @@ * DEALINGS IN THE SOFTWARE. */ +#ifndef OSI_STRIPPED_LIB #include "../osi/common/common.h" #include "dma_local.h" #include "eqos_dma.h" -#include "../osi/common/type.h" - -/** - * @brief eqos_dma_safety_config - EQOS MAC DMA safety configuration - */ -static struct dma_func_safety eqos_dma_safety_config; - -/** - * @brief Write to safety critical register. - * - * @note - * Algorithm: - * - Acquire RW lock, so that eqos_validate_dma_regs does not run while - * updating the safety critical register. - * - call osi_writel() to actually update the memory mapped register. - * - Store the same value in eqos_dma_safety_config->reg_val[idx], so that - * this latest value will be compared when eqos_validate_dma_regs is - * scheduled. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] val: Value to be written. - * @param[in] addr: memory mapped register address to be written to. - * @param[in] idx: Index of register corresponding to enum func_safety_dma_regs. - * - * @pre MAC has to be out of reset, and clocks supplied. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: Yes - */ -static inline void eqos_dma_safety_writel(struct osi_dma_priv_data *osi_dma, - nveu32_t val, void *addr, - nveu32_t idx) -{ - struct dma_func_safety *config = &eqos_dma_safety_config; - - osi_lock_irq_enabled(&config->dma_safety_lock); - osi_writela(osi_dma->osd, val, addr); - config->reg_val[idx] = (val & config->reg_mask[idx]); - osi_unlock_irq_enabled(&config->dma_safety_lock); -} - -/** - * @brief Initialize the eqos_dma_safety_config. - * - * @param[in] osi_dma: OSI DMA private data structure. - * - * @note - * Algorithm: - * - Populate the list of safety critical registers and provide - * - the address of the register - * - Register mask (to ignore reserved/self-critical bits in the reg). - * See eqos_validate_dma_regs which can be invoked periodically to compare - * the last written value to this register vs the actual value read when - * eqos_validate_dma_regs is scheduled. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_dma_safety_init(struct osi_dma_priv_data *osi_dma) -{ - struct dma_func_safety *config = &eqos_dma_safety_config; - nveu8_t *base = (nveu8_t *)osi_dma->base; - nveu32_t val; - nveu32_t i, idx; - - /* Initialize all reg address to NULL, since we may not use - * some regs depending on the number of DMA chans enabled. - */ - for (i = EQOS_DMA_CH0_CTRL_IDX; i < EQOS_MAX_DMA_SAFETY_REGS; i++) { - config->reg_addr[i] = OSI_NULL; - } - - for (i = 0U; i < osi_dma->num_dma_chans; i++) { - idx = osi_dma->dma_chans[i]; -#if 0 - CHECK_CHAN_BOUND(idx); -#endif - config->reg_addr[EQOS_DMA_CH0_CTRL_IDX + idx] = base + - EQOS_DMA_CHX_CTRL(idx); - config->reg_addr[EQOS_DMA_CH0_TX_CTRL_IDX + idx] = base + - EQOS_DMA_CHX_TX_CTRL(idx); - config->reg_addr[EQOS_DMA_CH0_RX_CTRL_IDX + idx] = base + - EQOS_DMA_CHX_RX_CTRL(idx); - config->reg_addr[EQOS_DMA_CH0_TDRL_IDX + idx] = base + - EQOS_DMA_CHX_TDRL(idx); - config->reg_addr[EQOS_DMA_CH0_RDRL_IDX + idx] = base + - EQOS_DMA_CHX_RDRL(idx); - config->reg_addr[EQOS_DMA_CH0_INTR_ENA_IDX + idx] = base + - EQOS_DMA_CHX_INTR_ENA(idx); - - config->reg_mask[EQOS_DMA_CH0_CTRL_IDX + idx] = - EQOS_DMA_CHX_CTRL_MASK; - config->reg_mask[EQOS_DMA_CH0_TX_CTRL_IDX + idx] = - EQOS_DMA_CHX_TX_CTRL_MASK; - config->reg_mask[EQOS_DMA_CH0_RX_CTRL_IDX + idx] = - EQOS_DMA_CHX_RX_CTRL_MASK; - config->reg_mask[EQOS_DMA_CH0_TDRL_IDX + idx] = - EQOS_DMA_CHX_TDRL_MASK; - config->reg_mask[EQOS_DMA_CH0_RDRL_IDX + idx] = - EQOS_DMA_CHX_RDRL_MASK; - config->reg_mask[EQOS_DMA_CH0_INTR_ENA_IDX + idx] = - EQOS_DMA_CHX_INTR_ENA_MASK; - } - - /* Initialize current power-on-reset values of these registers. */ - for (i = EQOS_DMA_CH0_CTRL_IDX; i < EQOS_MAX_DMA_SAFETY_REGS; i++) { - if (config->reg_addr[i] == OSI_NULL) { - continue; - } - val = osi_readl((nveu8_t *)config->reg_addr[i]); - config->reg_val[i] = val & config->reg_mask[i]; - } - - osi_lock_init(&config->dma_safety_lock); -} - -/** - * @brief eqos_disable_chan_tx_intr - Disables DMA Tx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OSDependent layer and pass corresponding channel number. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: Yes - */ -static void eqos_disable_chan_tx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl, status; - -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - /* Clear irq before disabling */ - status = osi_readl((nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_STATUS(chan)); - if ((status & EQOS_VIRT_INTR_CHX_STATUS_TX) == - EQOS_VIRT_INTR_CHX_STATUS_TX) { - osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_TX, - (nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan)); - osi_writel(EQOS_VIRT_INTR_CHX_STATUS_TX, - (nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_STATUS(chan)); - } - - /* Disable the irq */ - cntrl = osi_readl((nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); - cntrl &= ~EQOS_VIRT_INTR_CHX_CNTRL_TX; - osi_writel(cntrl, (nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief eqos_enable_chan_tx_intr - Enable Tx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OSDependent layer and pass corresponding channel number. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_enable_chan_tx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - cntrl = osi_readl((nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); - cntrl |= EQOS_VIRT_INTR_CHX_CNTRL_TX; - osi_writel(cntrl, (nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief eqos_disable_chan_rx_intr - Disable Rx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - Mapping of physical IRQ line to DMA channel need to be maintained at - * OSDependent layer and pass corresponding channel number. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: Yes - */ -static void eqos_disable_chan_rx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl, status; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - /* Clear irq before disabling */ - status = osi_readl((nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_STATUS(chan)); - if ((status & EQOS_VIRT_INTR_CHX_STATUS_RX) == - EQOS_VIRT_INTR_CHX_STATUS_RX) { - osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_RX, - (nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan)); - osi_writel(EQOS_VIRT_INTR_CHX_STATUS_RX, - (nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_STATUS(chan)); - } - - /* Disable irq */ - cntrl = osi_readl((nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); - cntrl &= ~EQOS_VIRT_INTR_CHX_CNTRL_RX; - osi_writel(cntrl, (nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief eqos_enable_chan_rx_intr - Enable Rx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_enable_chan_rx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - cntrl = osi_readl((nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); - cntrl |= EQOS_VIRT_INTR_CHX_CNTRL_RX; - osi_writel(cntrl, (nveu8_t *)addr + - EQOS_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief eqos_set_tx_ring_len - Set DMA Tx ring length. - * - * @note - * Algorithm: - * - Set DMA Tx channel ring length for specific channel. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] chan: DMA Tx channel number. - * @param[in] len: Length. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_set_tx_ring_len(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - nveu32_t len) -{ - void *addr = osi_dma->base; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - eqos_dma_safety_writel(osi_dma, len, (nveu8_t *)addr + - EQOS_DMA_CHX_TDRL(chan), - EQOS_DMA_CH0_TDRL_IDX + chan); -} - -/** - * @brief eqos_set_tx_ring_start_addr - Set DMA Tx ring base address. - * - * @note - * Algorithm: - * - Sets DMA Tx ring base address for specific channel. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * @param[in] tx_desc: Tx desc base address. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_set_tx_ring_start_addr(void *addr, nveu32_t chan, - nveu64_t tx_desc) -{ - nveu64_t tmp; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - tmp = H32(tx_desc); - if (tmp < UINT_MAX) { - osi_writel((nveu32_t)tmp, (nveu8_t *)addr + - EQOS_DMA_CHX_TDLH(chan)); - } - - tmp = L32(tx_desc); - if (tmp < UINT_MAX) { - osi_writel((nveu32_t)tmp, (nveu8_t *)addr + - EQOS_DMA_CHX_TDLA(chan)); - } -} - -/** - * @brief eqos_update_tx_tailptr - Updates DMA Tx ring tail pointer. - * - * @note - * Algorithm: - * - Updates DMA Tx ring tail pointer for specific channel. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * @param[in] tailptr: DMA Tx ring tail pointer. - * - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_update_tx_tailptr(void *addr, nveu32_t chan, - nveu64_t tailptr) -{ - nveu64_t tmp; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - tmp = L32(tailptr); - if (tmp < UINT_MAX) { - osi_writel((nveu32_t)tmp, (nveu8_t *)addr + - EQOS_DMA_CHX_TDTP(chan)); - } -} - -/** - * @brief eqos_set_rx_ring_len - Set Rx channel ring length. - * - * @note - * Algorithm: - * - Sets DMA Rx channel ring length for specific DMA channel. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] chan: DMA Rx channel number. - * @param[in] len: Length - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_set_rx_ring_len(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - nveu32_t len) -{ - void *addr = osi_dma->base; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - eqos_dma_safety_writel(osi_dma, len, (nveu8_t *)addr + - EQOS_DMA_CHX_RDRL(chan), - EQOS_DMA_CH0_RDRL_IDX + chan); -} - -/** - * @brief eqos_set_rx_ring_start_addr - Set DMA Rx ring base address. - * - * @note - * Algorithm: - * - Sets DMA Rx channel ring base address. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * @param[in] rx_desc: DMA Rx desc base address. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_set_rx_ring_start_addr(void *addr, nveu32_t chan, - nveu64_t rx_desc) -{ - nveu64_t tmp; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - tmp = H32(rx_desc); - if (tmp < UINT_MAX) { - osi_writel((nveu32_t)tmp, (nveu8_t *)addr + - EQOS_DMA_CHX_RDLH(chan)); - } - - tmp = L32(rx_desc); - if (tmp < UINT_MAX) { - osi_writel((nveu32_t)tmp, (nveu8_t *)addr + - EQOS_DMA_CHX_RDLA(chan)); - } -} - -/** - * @brief eqos_update_rx_tailptr - Update Rx ring tail pointer - * - * @note - * Algorithm: - * - Updates DMA Rx channel tail pointer for specific channel. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * @param[in] tailptr: Tail pointer - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_update_rx_tailptr(void *addr, nveu32_t chan, - nveu64_t tailptr) -{ - nveu64_t tmp; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - tmp = L32(tailptr); - if (tmp < UINT_MAX) { - osi_writel((nveu32_t)tmp, (nveu8_t *)addr + - EQOS_DMA_CHX_RDTP(chan)); - } -} - -/** - * @brief eqos_start_dma - Start DMA. - * - * @note - * Algorithm: - * - Start Tx and Rx DMA for specific channel. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] chan: DMA Tx/Rx channel number. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_start_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan) -{ - nveu32_t val; - void *addr = osi_dma->base; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - /* start Tx DMA */ - val = osi_readla(osi_dma->osd, - (nveu8_t *)addr + EQOS_DMA_CHX_TX_CTRL(chan)); - val |= OSI_BIT(0); - eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr + - EQOS_DMA_CHX_TX_CTRL(chan), - EQOS_DMA_CH0_TX_CTRL_IDX + chan); - - /* start Rx DMA */ - val = osi_readla(osi_dma->osd, - (nveu8_t *)addr + EQOS_DMA_CHX_RX_CTRL(chan)); - val |= OSI_BIT(0); - val &= ~OSI_BIT(31); - eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr + - EQOS_DMA_CHX_RX_CTRL(chan), - EQOS_DMA_CH0_RX_CTRL_IDX + chan); -} - -/** - * @brief eqos_stop_dma - Stop DMA. - * - * @note - * Algorithm: - * - Start Tx and Rx DMA for specific channel. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] chan: DMA Tx/Rx channel number. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - * @note - * API Group: - * - Initialization: No - * - Run time: No - * - De-initialization: Yes - */ -static void eqos_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan) -{ - nveu32_t val; - void *addr = osi_dma->base; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - /* stop Tx DMA */ - val = osi_readla(osi_dma->osd, - (nveu8_t *)addr + EQOS_DMA_CHX_TX_CTRL(chan)); - val &= ~OSI_BIT(0); - eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr + - EQOS_DMA_CHX_TX_CTRL(chan), - EQOS_DMA_CH0_TX_CTRL_IDX + chan); - - /* stop Rx DMA */ - val = osi_readla(osi_dma->osd, - (nveu8_t *)addr + EQOS_DMA_CHX_RX_CTRL(chan)); - val &= ~OSI_BIT(0); - val |= OSI_BIT(31); - eqos_dma_safety_writel(osi_dma, val, (nveu8_t *)addr + - EQOS_DMA_CHX_RX_CTRL(chan), - EQOS_DMA_CH0_RX_CTRL_IDX + chan); -} - -/** - * @brief eqos_configure_dma_channel - Configure DMA channel - * - * @note - * Algorithm: - * - This takes care of configuring the below - * parameters for the DMA channel - * - Enabling DMA channel interrupts - * - Enable 8xPBL mode - * - Program Tx, Rx PBL - * - Enable TSO if HW supports - * - Program Rx Watchdog timer - * - * @param[in] chan: DMA channel number that need to be configured. - * @param[in] osi_dma: OSI DMA private data structure. - * - * @pre MAC has to be out of reset. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - */ -static void eqos_configure_dma_channel(nveu32_t chan, - struct osi_dma_priv_data *osi_dma) -{ - nveu32_t value; -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - /* enable DMA channel interrupts */ - /* Enable TIE and TBUE */ - /* TIE - Transmit Interrupt Enable */ - /* TBUE - Transmit Buffer Unavailable Enable */ - /* RIE - Receive Interrupt Enable */ - /* RBUE - Receive Buffer Unavailable Enable */ - /* AIE - Abnormal Interrupt Summary Enable */ - /* NIE - Normal Interrupt Summary Enable */ - /* FBE - Fatal Bus Error Enable */ - value = osi_readl((nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_INTR_ENA(chan)); - if (osi_dma->use_virtualization == OSI_DISABLE) { - value |= EQOS_DMA_CHX_INTR_TBUE | - EQOS_DMA_CHX_INTR_RBUE; - } - - value |= EQOS_DMA_CHX_INTR_TIE | EQOS_DMA_CHX_INTR_RIE | - EQOS_DMA_CHX_INTR_FBEE | EQOS_DMA_CHX_INTR_AIE | - EQOS_DMA_CHX_INTR_NIE; - /* For multi-irqs to work nie needs to be disabled */ - value &= ~(EQOS_DMA_CHX_INTR_NIE); - eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_INTR_ENA(chan), - EQOS_DMA_CH0_INTR_ENA_IDX + chan); - - /* Enable 8xPBL mode */ - value = osi_readl((nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_CTRL(chan)); - value |= EQOS_DMA_CHX_CTRL_PBLX8; - eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_CTRL(chan), - EQOS_DMA_CH0_CTRL_IDX + chan); - - /* Configure DMA channel Transmit control register */ - value = osi_readl((nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_TX_CTRL(chan)); - /* Enable OSF mode */ - value |= EQOS_DMA_CHX_TX_CTRL_OSF; - /* TxPBL = 32*/ - value |= EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED; - /* enable TSO by default if HW supports */ - value |= EQOS_DMA_CHX_TX_CTRL_TSE; - - eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_TX_CTRL(chan), - EQOS_DMA_CH0_TX_CTRL_IDX + chan); - - /* Configure DMA channel Receive control register */ - /* Select Rx Buffer size. Needs to be rounded up to next multiple of - * bus width - */ - value = osi_readl((nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_RX_CTRL(chan)); - - /* clear previous Rx buffer size */ - value &= ~EQOS_DMA_CHX_RBSZ_MASK; - - value |= (osi_dma->rx_buf_len << EQOS_DMA_CHX_RBSZ_SHIFT); - /* RXPBL = 12 */ - value |= EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED; - eqos_dma_safety_writel(osi_dma, value, (nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_RX_CTRL(chan), - EQOS_DMA_CH0_RX_CTRL_IDX + chan); - - /* Set Receive Interrupt Watchdog Timer Count */ - /* conversion of usec to RWIT value - * Eg: System clock is 125MHz, each clock cycle would then be 8ns - * For value 0x1 in RWT, device would wait for 512 clk cycles with - * RWTU as 0x1, - * ie, (8ns x 512) => 4.096us (rounding off to 4us) - * So formula with above values is,ret = usec/4 - */ - if ((osi_dma->use_riwt == OSI_ENABLE) && - (osi_dma->rx_riwt < UINT_MAX)) { - value = osi_readl((nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_RX_WDT(chan)); - /* Mask the RWT and RWTU value */ - value &= ~(EQOS_DMA_CHX_RX_WDT_RWT_MASK | - EQOS_DMA_CHX_RX_WDT_RWTU_MASK); - /* Conversion of usec to Rx Interrupt Watchdog Timer Count */ - value |= ((osi_dma->rx_riwt * - (EQOS_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) / - EQOS_DMA_CHX_RX_WDT_RWTU) & - EQOS_DMA_CHX_RX_WDT_RWT_MASK; - value |= EQOS_DMA_CHX_RX_WDT_RWTU_512_CYCLE; - osi_writel(value, (nveu8_t *)osi_dma->base + - EQOS_DMA_CHX_RX_WDT(chan)); - } -} - -/** - * @brief eqos_init_dma_channel - DMA channel INIT - * - * @param[in] osi_dma: OSI DMA private data structure. - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_init_dma_channel(struct osi_dma_priv_data *osi_dma) -{ - nveu32_t chinx; - - eqos_dma_safety_init(osi_dma); - - /* configure EQOS DMA channels */ - for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) { - eqos_configure_dma_channel(osi_dma->dma_chans[chinx], osi_dma); - } - - return 0; -} - -/** - * @brief eqos_set_rx_buf_len - Set Rx buffer length - * Sets the Rx buffer length based on the new MTU size set. - * - * @param[in, out] osi_dma: OSI DMA private data structure. - * - * @pre - * - MAC needs to be out of reset and proper clocks need to be configured - * - DMA HW init need to be completed successfully, see osi_hw_dma_init - * - osi_dma->mtu need to be filled with current MTU size <= 9K - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - */ -static void eqos_set_rx_buf_len(struct osi_dma_priv_data *osi_dma) -{ - nveu32_t rx_buf_len = 0U; - - /* Add Ethernet header + VLAN header + NET IP align size to MTU */ - if (osi_dma->mtu <= OSI_MAX_MTU_SIZE) { - rx_buf_len = osi_dma->mtu + OSI_ETH_HLEN + NV_VLAN_HLEN + - OSI_NET_IP_ALIGN; - } else { - rx_buf_len = OSI_MAX_MTU_SIZE + OSI_ETH_HLEN + NV_VLAN_HLEN + - OSI_NET_IP_ALIGN; - } - - /* Buffer alignment */ - osi_dma->rx_buf_len = ((rx_buf_len + (EQOS_AXI_BUS_WIDTH - 1U)) & - ~(EQOS_AXI_BUS_WIDTH - 1U)); -} - -#ifndef OSI_STRIPPED_LIB -/** - * @brief Read-validate HW registers for functional safety. - * - * @note - * Algorithm: - * - Reads pre-configured list of MAC/MTL configuration registers - * and compares with last written value for any modifications. - * - * @param[in] osi_dma: OSI DMA private data structure. - * - * @pre - * - MAC has to be out of reset. - * - osi_hw_dma_init has to be called. Internally this would initialize - * the safety_config (see osi_dma_priv_data) based on MAC version and - * which specific registers needs to be validated periodically. - * - Invoke this call if (osi_dma_priv_data->safety_config != OSI_NULL) - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t eqos_validate_dma_regs(struct osi_dma_priv_data *osi_dma) -{ - struct dma_func_safety *config = - (struct dma_func_safety *)osi_dma->safety_config; - nveu32_t cur_val; - nveu32_t i; - - osi_lock_irq_enabled(&config->dma_safety_lock); - for (i = EQOS_DMA_CH0_CTRL_IDX; i < EQOS_MAX_DMA_SAFETY_REGS; i++) { - if (config->reg_addr[i] == OSI_NULL) { - continue; - } - - cur_val = osi_readl((nveu8_t *)config->reg_addr[i]); - cur_val &= config->reg_mask[i]; - - if (cur_val == config->reg_val[i]) { - continue; - } else { - /* Register content differs from what was written. - * Return error and let safety manager (NVGaurd etc.) - * take care of corrective action. - */ - osi_unlock_irq_enabled(&config->dma_safety_lock); - return -1; - } - } - osi_unlock_irq_enabled(&config->dma_safety_lock); - - return 0; -} /** * @brief eqos_config_slot - Configure slot Checking for DMA channel @@ -895,94 +80,66 @@ static void eqos_config_slot(struct osi_dma_priv_data *osi_dma, EQOS_DMA_CHX_SLOT_CTRL(chan)); } } -#endif /* !OSI_STRIPPED_LIB */ +#ifdef OSI_DEBUG /** - * @brief eqos_clear_vm_tx_intr - Handle VM Tx interrupt - * - * @param[in] addr: MAC base address. - * @param[in] chan: DMA Tx channel number. + * @brief Enable/disable debug interrupt * - * Algorithm: Clear Tx interrupt source at DMA and wrapper level. + * @param[in] osi_dma: OSI DMA private data structure. * - * @note - * Dependencies: None. - * Protection: None. - * @retval None. + * Algorithm: + * - if osi_dma->ioctl_data.arg_u32 == OSI_ENABLE enable debug interrupt + * - else disable bebug inerrupts */ -static void eqos_clear_vm_tx_intr(void *addr, nveu32_t chan) +static void eqos_debug_intr_config(struct osi_dma_priv_data *osi_dma) { -#if 0 - CHECK_CHAN_BOUND(chan); -#endif - osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_TX, - (nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan)); - osi_writel(EQOS_VIRT_INTR_CHX_STATUS_TX, - (nveu8_t *)addr + EQOS_VIRT_INTR_CHX_STATUS(chan)); + nveu32_t chinx; + nveu32_t chan; + nveu32_t val; + nveu32_t enable = osi_dma->ioctl_data.arg_u32; + + if (enable == OSI_ENABLE) { + for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) { + chan = osi_dma->dma_chans[chinx]; + val = osi_readl((nveu8_t *)osi_dma->base + + EQOS_DMA_CHX_INTR_ENA(chan)); + + val |= (EQOS_DMA_CHX_INTR_AIE | + EQOS_DMA_CHX_INTR_FBEE | + EQOS_DMA_CHX_INTR_RBUE | + EQOS_DMA_CHX_INTR_TBUE | + EQOS_DMA_CHX_INTR_NIE); + osi_writel(val, (nveu8_t *)osi_dma->base + + EQOS_DMA_CHX_INTR_ENA(chan)); + } - eqos_disable_chan_tx_intr(addr, chan); + } else { + for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) { + chan = osi_dma->dma_chans[chinx]; + val = osi_readl((nveu8_t *)osi_dma->base + + EQOS_DMA_CHX_INTR_ENA(chan)); + val &= (~EQOS_DMA_CHX_INTR_AIE & + ~EQOS_DMA_CHX_INTR_FBEE & + ~EQOS_DMA_CHX_INTR_RBUE & + ~EQOS_DMA_CHX_INTR_TBUE & + ~EQOS_DMA_CHX_INTR_NIE); + osi_writel(val, (nveu8_t *)osi_dma->base + + EQOS_DMA_CHX_INTR_ENA(chan)); + } + } } - -/** - * @brief eqos_clear_vm_rx_intr - Handle VM Rx interrupt - * - * @param[in] addr: MAC base address. - * @param[in] chan: DMA Rx channel number. - * - * Algorithm: Clear Rx interrupt source at DMA and wrapper level. - * - * @note - * Dependencies: None. - * Protection: None. - * - * @retval None. - */ -static void eqos_clear_vm_rx_intr(void *addr, nveu32_t chan) -{ -#if 0 - CHECK_CHAN_BOUND(chan); #endif - osi_writel(EQOS_DMA_CHX_STATUS_CLEAR_RX, - (nveu8_t *)addr + EQOS_DMA_CHX_STATUS(chan)); - osi_writel(EQOS_VIRT_INTR_CHX_STATUS_RX, - (nveu8_t *)addr + EQOS_VIRT_INTR_CHX_STATUS(chan)); - eqos_disable_chan_rx_intr(addr, chan); -} - -/** - * @brief eqos_get_dma_safety_config - EQOS get DMA safety configuration - */ -void *eqos_get_dma_safety_config(void) -{ - return &eqos_dma_safety_config; -} - -/** +/* * @brief eqos_init_dma_chan_ops - Initialize EQOS DMA operations. * * @param[in] ops: DMA channel operations pointer. */ void eqos_init_dma_chan_ops(struct dma_chan_ops *ops) { - ops->set_tx_ring_len = eqos_set_tx_ring_len; - ops->set_rx_ring_len = eqos_set_rx_ring_len; - ops->set_tx_ring_start_addr = eqos_set_tx_ring_start_addr; - ops->set_rx_ring_start_addr = eqos_set_rx_ring_start_addr; - ops->update_tx_tailptr = eqos_update_tx_tailptr; - ops->update_rx_tailptr = eqos_update_rx_tailptr; - ops->disable_chan_tx_intr = eqos_disable_chan_tx_intr; - ops->enable_chan_tx_intr = eqos_enable_chan_tx_intr; - ops->disable_chan_rx_intr = eqos_disable_chan_rx_intr; - ops->enable_chan_rx_intr = eqos_enable_chan_rx_intr; - ops->start_dma = eqos_start_dma; - ops->stop_dma = eqos_stop_dma; - ops->init_dma_channel = eqos_init_dma_channel; - ops->set_rx_buf_len = eqos_set_rx_buf_len; -#ifndef OSI_STRIPPED_LIB - ops->validate_regs = eqos_validate_dma_regs; ops->config_slot = eqos_config_slot; -#endif /* !OSI_STRIPPED_LIB */ - ops->clear_vm_tx_intr = eqos_clear_vm_tx_intr; - ops->clear_vm_rx_intr = eqos_clear_vm_rx_intr; +#ifdef OSI_DEBUG + ops->debug_intr_config = eqos_debug_intr_config; +#endif } +#endif /* !OSI_STRIPPED_LIB */ diff --git a/kernel/nvethernetrm/osi/dma/eqos_dma.h b/kernel/nvethernetrm/osi/dma/eqos_dma.h index 76444384fc..d76d5eeba7 100644 --- a/kernel/nvethernetrm/osi/dma/eqos_dma.h +++ b/kernel/nvethernetrm/osi/dma/eqos_dma.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -55,9 +55,6 @@ #define EQOS_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x1110U) #define EQOS_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x1114U) #define EQOS_DMA_CHX_TDRL(x) ((0x0080U * (x)) + 0x112CU) -#define EQOS_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U)) -#define EQOS_VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U)) -#define EQOS_VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U)) /** @} */ /** @@ -66,8 +63,6 @@ * @brief Values defined for the DMA channel registers * @{ */ -#define EQOS_VIRT_INTR_CHX_STATUS_TX OSI_BIT(0) -#define EQOS_VIRT_INTR_CHX_STATUS_RX OSI_BIT(1) #define EQOS_DMA_CHX_STATUS_TI OSI_BIT(0) #define EQOS_DMA_CHX_STATUS_RI OSI_BIT(6) #define EQOS_DMA_CHX_STATUS_NIS OSI_BIT(15) @@ -76,21 +71,13 @@ #define EQOS_DMA_CHX_STATUS_CLEAR_RX \ (EQOS_DMA_CHX_STATUS_RI | EQOS_DMA_CHX_STATUS_NIS) -#define EQOS_VIRT_INTR_CHX_CNTRL_TX OSI_BIT(0) -#define EQOS_VIRT_INTR_CHX_CNTRL_RX OSI_BIT(1) - -#define EQOS_DMA_CHX_INTR_TIE OSI_BIT(0) +#ifdef OSI_DEBUG #define EQOS_DMA_CHX_INTR_TBUE OSI_BIT(2) -#define EQOS_DMA_CHX_INTR_RIE OSI_BIT(6) #define EQOS_DMA_CHX_INTR_RBUE OSI_BIT(7) #define EQOS_DMA_CHX_INTR_FBEE OSI_BIT(12) #define EQOS_DMA_CHX_INTR_AIE OSI_BIT(14) #define EQOS_DMA_CHX_INTR_NIE OSI_BIT(15) -#define EQOS_DMA_CHX_TX_CTRL_OSF OSI_BIT(4) -#define EQOS_DMA_CHX_TX_CTRL_TSE OSI_BIT(12) -#define EQOS_DMA_CHX_CTRL_PBLX8 OSI_BIT(16) -#define EQOS_DMA_CHX_RBSZ_MASK 0x7FFEU -#define EQOS_DMA_CHX_RBSZ_SHIFT 1U +#endif #define EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED 0x200000U #define EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED 0xC0000U #define EQOS_DMA_CHX_RX_WDT_RWT_MASK 0xFFU @@ -101,100 +88,10 @@ /* Below macros are used for periodic reg validation for functional safety. * HW register mask - to mask out reserved and self-clearing bits */ -#define EQOS_DMA_CHX_CTRL_MASK 0x11D3FFFU -#define EQOS_DMA_CHX_TX_CTRL_MASK 0xF3F9010U -#define EQOS_DMA_CHX_RX_CTRL_MASK 0x8F3F7FE0U -#define EQOS_DMA_CHX_TDRL_MASK 0x3FFU -#define EQOS_DMA_CHX_RDRL_MASK 0x3FFU -#define EQOS_DMA_CHX_INTR_ENA_MASK 0xFFC7U #ifndef OSI_STRIPPED_LIB #define EQOS_DMA_CHX_SLOT_SIV_MASK 0xFFFU #define EQOS_DMA_CHX_SLOT_SIV_SHIFT 4U #define EQOS_DMA_CHX_SLOT_ESC 0x1U #endif /* !OSI_STRIPPED_LIB */ -/* To add new registers to validate,append at end of below macro list and - * increment EQOS_MAX_DMA_SAFETY_REGS. - * Using macros instead of enum due to misra error. - */ -#define EQOS_DMA_CH0_CTRL_IDX 0U -#define EQOS_DMA_CH1_CTRL_IDX 1U -#define EQOS_DMA_CH2_CTRL_IDX 2U -#define EQOS_DMA_CH3_CTRL_IDX 3U -#define EQOS_DMA_CH4_CTRL_IDX 4U -#define EQOS_DMA_CH5_CTRL_IDX 5U -#define EQOS_DMA_CH6_CTRL_IDX 6U -#define EQOS_DMA_CH7_CTRL_IDX 7U -#define EQOS_DMA_CH0_TX_CTRL_IDX 8U -#define EQOS_DMA_CH1_TX_CTRL_IDX 9U -#define EQOS_DMA_CH2_TX_CTRL_IDX 10U -#define EQOS_DMA_CH3_TX_CTRL_IDX 11U -#define EQOS_DMA_CH4_TX_CTRL_IDX 12U -#define EQOS_DMA_CH5_TX_CTRL_IDX 13U -#define EQOS_DMA_CH6_TX_CTRL_IDX 14U -#define EQOS_DMA_CH7_TX_CTRL_IDX 15U -#define EQOS_DMA_CH0_RX_CTRL_IDX 16U -#define EQOS_DMA_CH1_RX_CTRL_IDX 17U -#define EQOS_DMA_CH2_RX_CTRL_IDX 18U -#define EQOS_DMA_CH3_RX_CTRL_IDX 19U -#define EQOS_DMA_CH4_RX_CTRL_IDX 20U -#define EQOS_DMA_CH5_RX_CTRL_IDX 21U -#define EQOS_DMA_CH6_RX_CTRL_IDX 22U -#define EQOS_DMA_CH7_RX_CTRL_IDX 23U -#define EQOS_DMA_CH0_TDRL_IDX 24U -#define EQOS_DMA_CH1_TDRL_IDX 25U -#define EQOS_DMA_CH2_TDRL_IDX 26U -#define EQOS_DMA_CH3_TDRL_IDX 27U -#define EQOS_DMA_CH4_TDRL_IDX 28U -#define EQOS_DMA_CH5_TDRL_IDX 29U -#define EQOS_DMA_CH6_TDRL_IDX 30U -#define EQOS_DMA_CH7_TDRL_IDX 31U -#define EQOS_DMA_CH0_RDRL_IDX 32U -#define EQOS_DMA_CH1_RDRL_IDX 33U -#define EQOS_DMA_CH2_RDRL_IDX 34U -#define EQOS_DMA_CH3_RDRL_IDX 35U -#define EQOS_DMA_CH4_RDRL_IDX 36U -#define EQOS_DMA_CH5_RDRL_IDX 37U -#define EQOS_DMA_CH6_RDRL_IDX 38U -#define EQOS_DMA_CH7_RDRL_IDX 39U -#define EQOS_DMA_CH0_INTR_ENA_IDX 40U -#define EQOS_DMA_CH1_INTR_ENA_IDX 41U -#define EQOS_DMA_CH2_INTR_ENA_IDX 42U -#define EQOS_DMA_CH3_INTR_ENA_IDX 43U -#define EQOS_DMA_CH4_INTR_ENA_IDX 44U -#define EQOS_DMA_CH5_INTR_ENA_IDX 45U -#define EQOS_DMA_CH6_INTR_ENA_IDX 46U -#define EQOS_DMA_CH7_INTR_ENA_IDX 47U -#define EQOS_MAX_DMA_SAFETY_REGS 48U -#define EQOS_AXI_BUS_WIDTH 0x10U /** @} */ - -/** - * @brief dma_func_safety - Struct used to store last written values of - * critical DMA HW registers. - */ -struct dma_func_safety { - /** Array of reg MMIO addresses (base EQoS + offset of reg) */ - void *reg_addr[EQOS_MAX_DMA_SAFETY_REGS]; - /** Array of bit-mask value of each corresponding reg - * (used to ignore self-clearing/reserved bits in reg) */ - nveu32_t reg_mask[EQOS_MAX_DMA_SAFETY_REGS]; - /** Array of value stored in each corresponding register */ - nveu32_t reg_val[EQOS_MAX_DMA_SAFETY_REGS]; - /** OSI lock variable used to protect writes to reg - * while validation is in-progress */ - nveu32_t dma_safety_lock; -}; - -/** - * @brief eqos_get_dma_safety_config - EQOS get DMA safety configuration - * - * @note - * API Group: - * - Initialization: Yes - * - Run time: No - * - De-initialization: No - * - * @returns Pointer to DMA safety configuration - */ -void *eqos_get_dma_safety_config(void); #endif /* INCLUDED_EQOS_DMA_H */ diff --git a/kernel/nvethernetrm/osi/dma/hw_common.h b/kernel/nvethernetrm/osi/dma/hw_common.h index a7b6335357..d1b6a287cb 100644 --- a/kernel/nvethernetrm/osi/dma/hw_common.h +++ b/kernel/nvethernetrm/osi/dma/hw_common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -30,7 +30,17 @@ * @{ */ #define HW_GLOBAL_DMA_STATUS 0x8700U +#define VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U)) +#define VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U)) +#define AXI_BUS_WIDTH 0x10U +#define DMA_CHX_INTR_TIE OSI_BIT(0) +#define DMA_CHX_INTR_RIE OSI_BIT(6) +#define DMA_CHX_CTRL_PBLX8 OSI_BIT(16) +#define DMA_CHX_TX_CTRL_OSP OSI_BIT(4) +#define DMA_CHX_TX_CTRL_TSE OSI_BIT(12) +#define DMA_CHX_RBSZ_MASK 0x7FFEU +#define DMA_CHX_RBSZ_SHIFT 1U +#define DMA_CHX_RX_WDT_RWT_MASK 0xFFU /** @} */ #endif /* INCLUDED_HW_COMMON_H */ - diff --git a/kernel/nvethernetrm/osi/dma/hw_desc.h b/kernel/nvethernetrm/osi/dma/hw_desc.h index 45cf8966d4..392531f7c2 100644 --- a/kernel/nvethernetrm/osi/dma/hw_desc.h +++ b/kernel/nvethernetrm/osi/dma/hw_desc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -45,22 +45,26 @@ #define RDES3_ERR_RE OSI_BIT(20) #define RDES3_ERR_DRIB OSI_BIT(19) #define RDES3_PKT_LEN 0x00007fffU -#define RDES3_LT (OSI_BIT(16) | OSI_BIT(17) | OSI_BIT(18)) -#define RDES3_LT_VT OSI_BIT(18) -#define RDES3_LT_DVT (OSI_BIT(16) | OSI_BIT(18)) -#define RDES3_RS0V OSI_BIT(25) #define RDES3_RS1V OSI_BIT(26) -#define RDES3_RSV OSI_BIT(26) -#define RDES0_OVT 0x0000FFFFU #define RDES3_TSD OSI_BIT(6) #define RDES3_TSA OSI_BIT(4) #define RDES1_TSA OSI_BIT(14) #define RDES1_TD OSI_BIT(15) +#ifndef OSI_STRIPPED_LIB +#define RDES3_LT (OSI_BIT(16) | OSI_BIT(17) | OSI_BIT(18)) +#define RDES3_LT_VT OSI_BIT(18) +#define RDES3_LT_DVT (OSI_BIT(16) | OSI_BIT(18)) +#define RDES0_OVT 0x0000FFFFU +#define RDES3_RS0V OSI_BIT(25) +#define RDES3_RSV OSI_BIT(26) #define RDES3_L34T 0x00F00000U #define RDES3_L34T_IPV4_TCP OSI_BIT(20) #define RDES3_L34T_IPV4_UDP OSI_BIT(21) #define RDES3_L34T_IPV6_TCP (OSI_BIT(23) | OSI_BIT(20)) #define RDES3_L34T_IPV6_UDP (OSI_BIT(23) | OSI_BIT(21)) +#define RDES3_ELLT_CVLAN 0x90000U +#define RDES3_ERR_MGBE_CRC (OSI_BIT(16) | OSI_BIT(17)) +#endif /* !OSI_STRIPPED_LIB */ #define RDES1_IPCE OSI_BIT(7) #define RDES1_IPCB OSI_BIT(6) @@ -73,7 +77,6 @@ #define RDES3_ELLT 0xF0000U #define RDES3_ELLT_IPHE 0x50000U #define RDES3_ELLT_CSUM_ERR 0x60000U -#define RDES3_ELLT_CVLAN 0x90000U /** @} */ /** Error Summary bits for Received packet */ @@ -83,7 +86,6 @@ /** MGBE error summary bits for Received packet */ #define RDES3_ES_MGBE 0x8000U -#define RDES3_ERR_MGBE_CRC (OSI_BIT(16) | OSI_BIT(17)) /** * @addtogroup EQOS_TxDesc Transmit Descriptors bit fields * diff --git a/kernel/nvethernetrm/osi/dma/libnvethernetcl.export b/kernel/nvethernetrm/osi/dma/libnvethernetcl.export index 311e3bc36c..2113803006 100644 --- a/kernel/nvethernetrm/osi/dma/libnvethernetcl.export +++ b/kernel/nvethernetrm/osi/dma/libnvethernetcl.export @@ -1,6 +1,6 @@ ################################### tell Emacs this is a -*- makefile-gmake -*- # -# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -23,8 +23,6 @@ # libnvethernetcl interface export # ############################################################################### -osi_start_dma -osi_stop_dma osi_get_refill_rx_desc_cnt osi_rx_dma_desc_init osi_set_rx_buf_len diff --git a/kernel/nvethernetrm/osi/core/libnvethernetrm.export b/kernel/nvethernetrm/osi/dma/libnvethernetcl_safety.export similarity index 66% rename from kernel/nvethernetrm/osi/core/libnvethernetrm.export rename to kernel/nvethernetrm/osi/dma/libnvethernetcl_safety.export index d27755aa8e..aa2eb44871 100644 --- a/kernel/nvethernetrm/osi/core/libnvethernetrm.export +++ b/kernel/nvethernetrm/osi/dma/libnvethernetcl_safety.export @@ -1,6 +1,6 @@ ################################### tell Emacs this is a -*- makefile-gmake -*- # -# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -20,30 +20,20 @@ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # -# libnvethernetrm interface export +# libnvethernetcl safety interface export # ############################################################################### -osi_init_core_ops -osi_write_phy_reg -osi_read_phy_reg -osi_hw_core_init -osi_hw_core_deinit -osi_get_core -osi_handle_ioctl -#Below need to be enabled when MACSEC is enabled -#osi_macsec_en -#osi_macsec_deinit -#osi_macsec_ns_isr -#osi_macsec_s_isr -#osi_macsec_init -#osi_macsec_cipher_config -#osi_macsec_config -#osi_init_macsec_ops -#osi_macsec_config_lut -#osi_macsec_loopback -#osi_macsec_read_mmc -#osi_macsec_config_dbg_buf -#osi_macsec_dbg_events_config -#osi_macsec_config_kt -#osi_macsec_get_sc_lut_key_index -#osi_macsec_update_mtu +osi_get_refill_rx_desc_cnt +osi_rx_dma_desc_init +osi_set_rx_buf_len +osi_hw_transmit +osi_process_tx_completions +osi_process_rx_completions +osi_hw_dma_init +osi_hw_dma_deinit +osi_init_dma_ops +osi_dma_get_systime_from_mac +osi_is_mac_enabled +osi_get_dma +osi_handle_dma_intr +osi_get_global_dma_status diff --git a/kernel/nvethernetrm/osi/dma/mgbe_desc.c b/kernel/nvethernetrm/osi/dma/mgbe_desc.c index ef12db5ac6..deac61941c 100644 --- a/kernel/nvethernetrm/osi/dma/mgbe_desc.c +++ b/kernel/nvethernetrm/osi/dma/mgbe_desc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,6 +24,7 @@ #include "hw_desc.h" #include "mgbe_desc.h" +#ifndef OSI_STRIPPED_LIB /** * @brief mgbe_get_rx_vlan - Get Rx VLAN from descriptor * @@ -94,34 +95,6 @@ static inline void mgbe_update_rx_err_stats(struct osi_rx_desc *rx_desc, } } -/** - * @brief mgbe_get_rx_csum - Get the Rx checksum from descriptor if valid - * - * Algorithm: - * 1) Check if the descriptor has any checksum validation errors. - * 2) If none, set a per packet context flag indicating no err in - * Rx checksum - * 3) The OSD layer will mark the packet appropriately to skip - * IP/TCP/UDP checksum validation in software based on whether - * COE is enabled for the device. - * - * @param[in] rx_desc: Rx descriptor - * @param[in] rx_pkt_cx: Per-Rx packet context structure - */ -static void mgbe_get_rx_csum(struct osi_rx_desc *rx_desc, - struct osi_rx_pkt_cx *rx_pkt_cx) -{ - unsigned int ellt = rx_desc->rdes3 & RDES3_ELLT; - - /* Always include either checksum none/unnecessary - * depending on status fields in desc. - * Hence no need to explicitly add OSI_PKT_CX_CSUM flag. - */ - if ((ellt != RDES3_ELLT_IPHE) && (ellt != RDES3_ELLT_CSUM_ERR)) { - rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY; - } -} - /** * @brief mgbe_get_rx_hash - Get Rx packet hash from descriptor if valid * @@ -157,8 +130,60 @@ static void mgbe_get_rx_hash(struct osi_rx_desc *rx_desc, rx_pkt_cx->rx_hash = rx_desc->rdes1; rx_pkt_cx->flags |= OSI_PKT_CX_RSS; } +#endif /* !OSI_STRIPPED_LIB */ + +/** + * @brief mgbe_get_rx_csum - Get the Rx checksum from descriptor if valid + * + * Algorithm: + * 1) Check if the descriptor has any checksum validation errors. + * 2) If none, set a per packet context flag indicating no err in + * Rx checksum + * 3) The OSD layer will mark the packet appropriately to skip + * IP/TCP/UDP checksum validation in software based on whether + * COE is enabled for the device. + * + * @param[in] rx_desc: Rx descriptor + * @param[in] rx_pkt_cx: Per-Rx packet context structure + */ +static void mgbe_get_rx_csum(const struct osi_rx_desc *const rx_desc, + struct osi_rx_pkt_cx *rx_pkt_cx) +{ + nveu32_t ellt = rx_desc->rdes3 & RDES3_ELLT; + nveu32_t pkt_type; + + /* Always include either checksum none/unnecessary + * depending on status fields in desc. + * Hence no need to explicitly add OSI_PKT_CX_CSUM flag. + */ + if ((ellt != RDES3_ELLT_IPHE) && (ellt != RDES3_ELLT_CSUM_ERR)) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UNNECESSARY; + } + + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4; + if (ellt == RDES3_ELLT_IPHE) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_IPv4_BAD; + } + + pkt_type = rx_desc->rdes3 & MGBE_RDES3_PT_MASK; + if (pkt_type == MGBE_RDES3_PT_IPV4_TCP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv4; + } else if (pkt_type == MGBE_RDES3_PT_IPV4_UDP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv4; + } else if (pkt_type == MGBE_RDES3_PT_IPV6_TCP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCPv6; + } else if (pkt_type == MGBE_RDES3_PT_IPV6_UDP) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_UDPv6; + } else { + /* Do nothing */ + } + + if (ellt == RDES3_ELLT_CSUM_ERR) { + rx_pkt_cx->rxcsum |= OSI_CHECKSUM_TCP_UDP_BAD; + } +} -/** +/** * @brief mgbe_get_rx_hwstamp - Get Rx HW Time stamp * * Algorithm: @@ -174,15 +199,17 @@ static void mgbe_get_rx_hash(struct osi_rx_desc *rx_desc, * @retval -1 if TimeStamp is not available * @retval 0 if TimeStamp is available. */ -static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, - struct osi_rx_desc *rx_desc, - struct osi_rx_desc *context_desc, - struct osi_rx_pkt_cx *rx_pkt_cx) +static nve32_t mgbe_get_rx_hwstamp(const struct osi_dma_priv_data *const osi_dma, + const struct osi_rx_desc *const rx_desc, + const struct osi_rx_desc *const context_desc, + struct osi_rx_pkt_cx *rx_pkt_cx) { - int retry; + nve32_t ret = 0; + nve32_t retry; if ((rx_desc->rdes3 & RDES3_CDA) != RDES3_CDA) { - return -1; + ret = -1; + goto fail; } for (retry = 0; retry < 10; retry++) { @@ -193,7 +220,8 @@ static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, if ((context_desc->rdes0 == OSI_INVALID_VALUE) && (context_desc->rdes1 == OSI_INVALID_VALUE)) { /* Invalid time stamp */ - return -1; + ret = -1; + goto fail; } /* Update rx pkt context flags to indicate PTP */ rx_pkt_cx->flags |= OSI_PKT_CX_PTP; @@ -207,24 +235,27 @@ static int mgbe_get_rx_hwstamp(struct osi_dma_priv_data *osi_dma, if (retry == 10) { /* Timed out waiting for Rx timestamp */ - return -1; + ret = -1; + goto fail; } rx_pkt_cx->ns = context_desc->rdes0 + (OSI_NSEC_PER_SEC * context_desc->rdes1); if (rx_pkt_cx->ns < context_desc->rdes0) { - /* Will not hit this case */ - return -1; + ret = -1; } - return 0; +fail: + return ret; } -void mgbe_init_desc_ops(struct desc_ops *d_ops) +void mgbe_init_desc_ops(struct desc_ops *p_dops) { - d_ops->get_rx_csum = mgbe_get_rx_csum; - d_ops->update_rx_err_stats = mgbe_update_rx_err_stats; - d_ops->get_rx_vlan = mgbe_get_rx_vlan; - d_ops->get_rx_hash = mgbe_get_rx_hash; - d_ops->get_rx_hwstamp = mgbe_get_rx_hwstamp; +#ifndef OSI_STRIPPED_LIB + p_dops->update_rx_err_stats = mgbe_update_rx_err_stats; + p_dops->get_rx_vlan = mgbe_get_rx_vlan; + p_dops->get_rx_hash = mgbe_get_rx_hash; +#endif /* !OSI_STRIPPED_LIB */ + p_dops->get_rx_csum = mgbe_get_rx_csum; + p_dops->get_rx_hwstamp = mgbe_get_rx_hwstamp; } diff --git a/kernel/nvethernetrm/osi/dma/mgbe_desc.h b/kernel/nvethernetrm/osi/dma/mgbe_desc.h index 8b0d5d0ede..1eb4824081 100644 --- a/kernel/nvethernetrm/osi/dma/mgbe_desc.h +++ b/kernel/nvethernetrm/osi/dma/mgbe_desc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,6 +23,7 @@ #ifndef MGBE_DESC_H_ #define MGBE_DESC_H_ +#ifndef OSI_STRIPPED_LIB /** * @addtogroup MGBE MAC FRP Stats. * @@ -32,6 +33,20 @@ #define MGBE_RDES2_FRPSM OSI_BIT(10) #define MGBE_RDES3_FRPSL OSI_BIT(14) /** @} */ +#endif /* !OSI_STRIPPED_LIB */ -#endif /* MGBE_DESC_H_ */ +/** + * @addtogroup MGBE RDESC bits. + * + * @brief Values defined for the MGBE rx descriptor bit fields + * @{ + */ + +#define MGBE_RDES3_PT_MASK (OSI_BIT(20) | OSI_BIT(21) | OSI_BIT(22) | OSI_BIT(23)) +#define MGBE_RDES3_PT_IPV4_TCP OSI_BIT(20) +#define MGBE_RDES3_PT_IPV4_UDP OSI_BIT(21) +#define MGBE_RDES3_PT_IPV6_TCP (OSI_BIT(20) | OSI_BIT(23)) +#define MGBE_RDES3_PT_IPV6_UDP (OSI_BIT(21) | OSI_BIT(23)) +/** @} */ +#endif /* MGBE_DESC_H_ */ diff --git a/kernel/nvethernetrm/osi/dma/mgbe_dma.c b/kernel/nvethernetrm/osi/dma/mgbe_dma.c index eab9f3b1a8..997d49e9b5 100644 --- a/kernel/nvethernetrm/osi/dma/mgbe_dma.c +++ b/kernel/nvethernetrm/osi/dma/mgbe_dma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,664 +20,12 @@ * DEALINGS IN THE SOFTWARE. */ +#ifndef OSI_STRIPPED_LIB #include "../osi/common/common.h" #include #include "mgbe_dma.h" #include "dma_local.h" -/** - * @brief mgbe_disable_chan_tx_intr - Disables DMA Tx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - * 3) Mapping of physical IRQ line to DMA channel need to be maintained at - * OSDependent layer and pass corresponding channel number. - */ -static void mgbe_disable_chan_tx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - cntrl = osi_readl((nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); - cntrl &= ~MGBE_VIRT_INTR_CHX_CNTRL_TX; - osi_writel(cntrl, (nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief mgbe_enable_chan_tx_intr - Enable Tx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - * 3) Mapping of physical IRQ line to DMA channel need to be maintained at - * OSDependent layer and pass corresponding channel number. - */ -static void mgbe_enable_chan_tx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - cntrl = osi_readl((nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); - cntrl |= MGBE_VIRT_INTR_CHX_CNTRL_TX; - osi_writel(cntrl, (nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief mgbe_disable_chan_rx_intr - Disable Rx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - * 3) Mapping of physical IRQ line to DMA channel need to be maintained at - * OSDependent layer and pass corresponding channel number. - */ -static void mgbe_disable_chan_rx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - cntrl = osi_readl((nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); - cntrl &= ~MGBE_VIRT_INTR_CHX_CNTRL_RX; - osi_writel(cntrl, (nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief mgbe_enable_chan_rx_intr - Enable Rx channel interrupts. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - */ -static void mgbe_enable_chan_rx_intr(void *addr, nveu32_t chan) -{ - nveu32_t cntrl; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - cntrl = osi_readl((nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); - cntrl |= MGBE_VIRT_INTR_CHX_CNTRL_RX; - osi_writel(cntrl, (nveu8_t *)addr + - MGBE_VIRT_INTR_CHX_CNTRL(chan)); -} - -/** - * @brief mgbe_set_tx_ring_len - Set DMA Tx ring length. - * - * Algorithm: Set DMA Tx channel ring length for specific channel. - * - * @param[in] osi_dma: OSI DMA data structure. - * @param[in] chan: DMA Tx channel number. - * @param[in] len: Length. - */ -static void mgbe_set_tx_ring_len(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - nveu32_t len) -{ - void *addr = osi_dma->base; - nveu32_t value; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - value = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CNTRL2(chan)); - value |= (len & MGBE_DMA_RING_LENGTH_MASK); - osi_writel(value, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CNTRL2(chan)); -} - -/** - * @brief mgbe_set_tx_ring_start_addr - Set DMA Tx ring base address. - * - * Algorithm: Sets DMA Tx ring base address for specific channel. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * @param[in] tx_desc: Tx desc base addess. - */ -static void mgbe_set_tx_ring_start_addr(void *addr, nveu32_t chan, - nveu64_t tx_desc) -{ - nveu64_t temp; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - temp = H32(tx_desc); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_TDLH(chan)); - } - - temp = L32(tx_desc); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_TDLA(chan)); - } -} - -/** - * @brief mgbe_update_tx_tailptr - Updates DMA Tx ring tail pointer. - * - * Algorithm: Updates DMA Tx ring tail pointer for specific channel. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Tx channel number. - * @param[in] tailptr: DMA Tx ring tail pointer. - * - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - */ -static void mgbe_update_tx_tailptr(void *addr, nveu32_t chan, - nveu64_t tailptr) -{ - nveu64_t temp; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - temp = L32(tailptr); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_TDTLP(chan)); - } -} - -/** - * @brief mgbe_set_rx_ring_len - Set Rx channel ring length. - * - * Algorithm: Sets DMA Rx channel ring length for specific DMA channel. - * - * @param[in] osi_dma: OSI DMA data structure. - * @param[in] chan: DMA Rx channel number. - * @param[in] len: Length - */ -static void mgbe_set_rx_ring_len(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - nveu32_t len) -{ - void *addr = osi_dma->base; - nveu32_t value; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - value = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CNTRL2(chan)); - value |= (len & MGBE_DMA_RING_LENGTH_MASK); - osi_writel(value, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CNTRL2(chan)); -} - -/** - * @brief mgbe_set_rx_ring_start_addr - Set DMA Rx ring base address. - * - * Algorithm: Sets DMA Rx channel ring base address. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * @param[in] tx_desc: DMA Rx desc base address. - */ -static void mgbe_set_rx_ring_start_addr(void *addr, nveu32_t chan, - nveu64_t tx_desc) -{ - nveu64_t temp; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - temp = H32(tx_desc); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_RDLH(chan)); - } - - temp = L32(tx_desc); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_RDLA(chan)); - } -} - -/** - * @brief mgbe_update_rx_tailptr - Update Rx ring tail pointer - * - * Algorithm: Updates DMA Rx channel tail pointer for specific channel. - * - * @param[in] addr: Base address indicating the start of - * memory mapped IO region of the MAC. - * @param[in] chan: DMA Rx channel number. - * @param[in] tailptr: Tail pointer - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - */ -static void mgbe_update_rx_tailptr(void *addr, nveu32_t chan, - nveu64_t tailptr) -{ - nveu64_t temp; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - temp = H32(tailptr); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_RDTHP(chan)); - } - - temp = L32(tailptr); - if (temp < UINT_MAX) { - osi_writel((nveu32_t)temp, (nveu8_t *)addr + - MGBE_DMA_CHX_RDTLP(chan)); - } -} - -/** - * @brief mgbe_start_dma - Start DMA. - * - * Algorithm: Start Tx and Rx DMA for specific channel. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] chan: DMA Tx/Rx channel number. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - */ -static void mgbe_start_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan) -{ - nveu32_t val; - void *addr = osi_dma->base; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - /* start Tx DMA */ - val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan)); - val |= OSI_BIT(0); - osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan)); - - /* start Rx DMA */ - val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan)); - val |= OSI_BIT(0); - val &= ~OSI_BIT(31); - osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan)); -} - -/** - * @brief mgbe_stop_dma - Stop DMA. - * - * Algorithm: Start Tx and Rx DMA for specific channel. - * - * @param[in] osi_dma: OSI DMA private data structure. - * @param[in] chan: DMA Tx/Rx channel number. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - */ -static void mgbe_stop_dma(struct osi_dma_priv_data *osi_dma, nveu32_t chan) -{ - nveu32_t val; - void *addr = osi_dma->base; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - /* stop Tx DMA */ - val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan)); - val &= ~OSI_BIT(0); - osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_TX_CTRL(chan)); - - /* stop Rx DMA */ - val = osi_readl((nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan)); - val &= ~OSI_BIT(0); - val |= OSI_BIT(31); - osi_writel(val, (nveu8_t *)addr + MGBE_DMA_CHX_RX_CTRL(chan)); -} - -/** - * @brief mgbe_configure_dma_channel - Configure DMA channel - * - * Algorithm: This takes care of configuring the below - * parameters for the DMA channel - * 1) Enabling DMA channel interrupts - * 2) Enable 8xPBL mode - * 3) Program Tx, Rx PBL - * 4) Enable TSO if HW supports - * 5) Program Rx Watchdog timer - * 6) Program Out Standing DMA Read Requests - * 7) Program Out Standing DMA write Requests - * - * @param[in] chan: DMA channel number that need to be configured. - * @param[in] owrq: out standing write dma requests - * @param[in] orrq: out standing read dma requests - * @param[in] osi_dma: OSI DMA private data structure. - * - * @note MAC has to be out of reset. - */ -static void mgbe_configure_dma_channel(nveu32_t chan, - nveu32_t owrq, - nveu32_t orrq, - struct osi_dma_priv_data *osi_dma) -{ - nveu32_t value; - nveu32_t txpbl; - nveu32_t rxpbl; -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - /* enable DMA channel interrupts */ - /* Enable TIE and TBUE */ - /* TIE - Transmit Interrupt Enable */ - /* TBUE - Transmit Buffer Unavailable Enable */ - /* RIE - Receive Interrupt Enable */ - /* RBUE - Receive Buffer Unavailable Enable */ - /* AIE - Abnormal Interrupt Summary Enable */ - /* NIE - Normal Interrupt Summary Enable */ - /* FBE - Fatal Bus Error Enable */ - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_INTR_ENA(chan)); - value |= MGBE_DMA_CHX_INTR_TIE | MGBE_DMA_CHX_INTR_TBUE | - MGBE_DMA_CHX_INTR_RIE | MGBE_DMA_CHX_INTR_RBUE | - MGBE_DMA_CHX_INTR_FBEE | MGBE_DMA_CHX_INTR_AIE | - MGBE_DMA_CHX_INTR_NIE; - - /* For multi-irqs to work nie needs to be disabled */ - /* TODO: do we need this ? */ - value &= ~(MGBE_DMA_CHX_INTR_NIE); - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_INTR_ENA(chan)); - - /* Enable 8xPBL mode */ - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_CTRL(chan)); - value |= MGBE_DMA_CHX_CTRL_PBLX8; - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_CTRL(chan)); - - /* Configure DMA channel Transmit control register */ - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_TX_CTRL(chan)); - /* Enable OSF mode */ - value |= MGBE_DMA_CHX_TX_CTRL_OSP; - - /* - * Formula for TxPBL calculation is - * (TxPBL) < ((TXQSize - MTU)/(DATAWIDTH/8)) - 5 - * if TxPBL exceeds the value of 256 then we need to make use of 256 - * as the TxPBL else we should be using the value whcih we get after - * calculation by using above formula - */ - if (osi_dma->pre_si == OSI_ENABLE) { - txpbl = ((((MGBE_TXQ_RXQ_SIZE_FPGA / osi_dma->num_dma_chans) - - osi_dma->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U); - } else { - txpbl = ((((MGBE_TXQ_SIZE / osi_dma->num_dma_chans) - - osi_dma->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U); - } - - /* Since PBLx8 is set, so txpbl/8 will be the value that - * need to be programmed - */ - if (txpbl >= MGBE_DMA_CHX_MAX_PBL) { - value |= ((MGBE_DMA_CHX_MAX_PBL / 8U) << - MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } else { - value |= ((txpbl / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } - - /* enable TSO by default if HW supports */ - value |= MGBE_DMA_CHX_TX_CTRL_TSE; - - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_TX_CTRL(chan)); - - /* Configure DMA channel Receive control register */ - /* Select Rx Buffer size. Needs to be rounded up to next multiple of - * bus width - */ - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_CTRL(chan)); - - /* clear previous Rx buffer size */ - value &= ~MGBE_DMA_CHX_RBSZ_MASK; - value |= (osi_dma->rx_buf_len << MGBE_DMA_CHX_RBSZ_SHIFT); - /* RxPBL calculation is - * RxPBL <= Rx Queue Size/2 - */ - if (osi_dma->pre_si == OSI_ENABLE) { - rxpbl = (((MGBE_TXQ_RXQ_SIZE_FPGA / osi_dma->num_dma_chans) / - 2U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } else { - rxpbl = (((MGBE_RXQ_SIZE / osi_dma->num_dma_chans) / 2U) << - MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } - /* Since PBLx8 is set, so rxpbl/8 will be the value that - * need to be programmed - */ - if (rxpbl >= MGBE_DMA_CHX_MAX_PBL) { - value |= ((MGBE_DMA_CHX_MAX_PBL / 8) << - MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } else { - value |= ((rxpbl / 8) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); - } - - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_CTRL(chan)); - - /* Set Receive Interrupt Watchdog Timer Count */ - /* conversion of usec to RWIT value - * Eg:System clock is 62.5MHz, each clock cycle would then be 16ns - * For value 0x1 in watchdog timer,device would wait for 256 clk cycles, - * ie, (16ns x 256) => 4.096us (rounding off to 4us) - * So formula with above values is,ret = usec/4 - */ - /* NOTE: Bug 3287883: If RWTU value programmed then driver needs - * to follow below order - - * 1. First write RWT field with non-zero value. - * 2. Program RWTU field of register - * DMA_CH(#i)_Rx_Interrupt_Watchdog_Time. - */ - if ((osi_dma->use_riwt == OSI_ENABLE) && - (osi_dma->rx_riwt < UINT_MAX)) { - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_WDT(chan)); - /* Mask the RWT value */ - value &= ~MGBE_DMA_CHX_RX_WDT_RWT_MASK; - /* Conversion of usec to Rx Interrupt Watchdog Timer Count */ - /* TODO: Need to fix AXI clock for silicon */ - value |= ((osi_dma->rx_riwt * - ((nveu32_t)MGBE_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) / - MGBE_DMA_CHX_RX_WDT_RWTU) & - MGBE_DMA_CHX_RX_WDT_RWT_MASK; - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_WDT(chan)); - - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_WDT(chan)); - value &= ~(MGBE_DMA_CHX_RX_WDT_RWTU_MASK << - MGBE_DMA_CHX_RX_WDT_RWTU_SHIFT); - value |= (MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE << - MGBE_DMA_CHX_RX_WDT_RWTU_SHIFT); - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_WDT(chan)); - } - - /* Update ORRQ in DMA_CH(#i)_Tx_Control2 register */ - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_TX_CNTRL2(chan)); - value |= (orrq << MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT); - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_TX_CNTRL2(chan)); - - /* Update OWRQ in DMA_CH(#i)_Rx_Control2 register */ - value = osi_readl((nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_CNTRL2(chan)); - value |= (owrq << MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT); - osi_writel(value, (nveu8_t *)osi_dma->base + - MGBE_DMA_CHX_RX_CNTRL2(chan)); -} - -/** - * @brief mgbe_init_dma_channel - DMA channel INIT - * - * @param[in] osi_dma: OSI DMA private data structure. - */ -static nve32_t mgbe_init_dma_channel(struct osi_dma_priv_data *osi_dma) -{ - nveu32_t chinx; - nveu32_t owrq; - nveu32_t orrq; - - /* DMA Read Out Standing Requests */ - /* For Presi ORRQ is 16 in case of schannel and 64 in case of mchannel. - * For Si ORRQ is 64 in case of single and multi channel - */ - orrq = (MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED / - osi_dma->num_dma_chans); - if ((osi_dma->num_dma_chans == 1U) && (osi_dma->pre_si == OSI_ENABLE)) { - /* For Presi ORRQ is 16 in a single channel configuration - * so overwrite only for this configuration - */ - orrq = MGBE_DMA_CHX_RX_CNTRL2_ORRQ_SCHAN_PRESI; - } - - /* DMA Write Out Standing Requests */ - /* For Presi OWRQ is 8 and for Si it is 32 in case of single channel. - * For Multi Channel OWRQ is 64 for both si and presi - */ - if (osi_dma->num_dma_chans == 1U) { - if (osi_dma->pre_si == OSI_ENABLE) { - owrq = MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN_PRESI; - } else { - owrq = MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN; - } - } else { - owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN / - osi_dma->num_dma_chans); - } - - /* configure MGBE DMA channels */ - for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) { - mgbe_configure_dma_channel(osi_dma->dma_chans[chinx], - owrq, orrq, osi_dma); - } - - return 0; -} - -/** - * @brief mgbe_set_rx_buf_len - Set Rx buffer length - * Sets the Rx buffer length based on the new MTU size set. - * - * @param[in] osi_dma: OSI DMA private data structure. - * - * @note 1) MAC needs to be out of reset and proper clocks need to be configured - * 2) DMA HW init need to be completed successfully, see osi_hw_dma_init - * 3) osi_dma->mtu need to be filled with current MTU size <= 9K - */ -static void mgbe_set_rx_buf_len(struct osi_dma_priv_data *osi_dma) -{ - nveu32_t rx_buf_len; - - /* Add Ethernet header + FCS + NET IP align size to MTU */ - rx_buf_len = osi_dma->mtu + OSI_ETH_HLEN + - NV_VLAN_HLEN + OSI_NET_IP_ALIGN; - /* Buffer alignment */ - osi_dma->rx_buf_len = ((rx_buf_len + (MGBE_AXI_BUS_WIDTH - 1U)) & - ~(MGBE_AXI_BUS_WIDTH - 1U)); -} - -/** - * @brief Read-validate HW registers for functional safety. - * - * @note - * Algorithm: - * - Reads pre-configured list of MAC/MTL configuration registers - * and compares with last written value for any modifications. - * - * @param[in] osi_dma: OSI DMA private data structure. - * - * @pre - * - MAC has to be out of reset. - * - osi_hw_dma_init has to be called. Internally this would initialize - * the safety_config (see osi_dma_priv_data) based on MAC version and - * which specific registers needs to be validated periodically. - * - Invoke this call if (osi_dma_priv_data->safety_config != OSI_NULL) - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @retval 0 on success - * @retval -1 on failure. - */ -static nve32_t mgbe_validate_dma_regs(OSI_UNUSED - struct osi_dma_priv_data *osi_dma) -{ - /* TODO: for mgbe */ - return 0; -} - -/** - * @brief mgbe_clear_vm_tx_intr - Clear VM Tx interrupt - * - * Algorithm: Clear Tx interrupt source at DMA and wrapper level. - * - * @param[in] addr: MAC base address. - * @param[in] chan: DMA Tx channel number. - */ -static void mgbe_clear_vm_tx_intr(void *addr, nveu32_t chan) -{ -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - osi_writel(MGBE_DMA_CHX_STATUS_CLEAR_TX, - (nveu8_t *)addr + MGBE_DMA_CHX_STATUS(chan)); - osi_writel(MGBE_VIRT_INTR_CHX_STATUS_TX, - (nveu8_t *)addr + MGBE_VIRT_INTR_CHX_STATUS(chan)); - - mgbe_disable_chan_tx_intr(addr, chan); -} - -/** - * @brief mgbe_clear_vm_rx_intr - Clear VM Rx interrupt - * - * @param[in] addr: MAC base address. - * @param[in] chan: DMA Tx channel number. - * - * Algorithm: Clear Rx interrupt source at DMA and wrapper level. - */ -static void mgbe_clear_vm_rx_intr(void *addr, nveu32_t chan) -{ -#if 0 - MGBE_CHECK_CHAN_BOUND(chan); -#endif - osi_writel(MGBE_DMA_CHX_STATUS_CLEAR_RX, - (nveu8_t *)addr + MGBE_DMA_CHX_STATUS(chan)); - osi_writel(MGBE_VIRT_INTR_CHX_STATUS_RX, - (nveu8_t *)addr + MGBE_VIRT_INTR_CHX_STATUS(chan)); - - mgbe_disable_chan_rx_intr(addr, chan); -} - /** * @brief mgbe_config_slot - Configure slot Checking for DMA channel * @@ -720,24 +68,60 @@ static void mgbe_config_slot(struct osi_dma_priv_data *osi_dma, } } +#ifdef OSI_DEBUG +/** + * @brief Enable/disable debug interrupt + * + * @param[in] osi_dma: OSI DMA private data structure. + * + * Algorithm: + * - if osi_dma->ioctl_data.arg_u32 == OSI_ENABLE enable debug interrupt + * - else disable bebug inerrupts + */ +static void mgbe_debug_intr_config(struct osi_dma_priv_data *osi_dma) +{ + nveu32_t chinx; + nveu32_t chan; + nveu32_t val; + nveu32_t enable = osi_dma->ioctl_data.arg_u32; + + if (enable == OSI_ENABLE) { + for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) { + chan = osi_dma->dma_chans[chinx]; + val = osi_readl((nveu8_t *)osi_dma->base + + MGBE_DMA_CHX_INTR_ENA(chan)); + + val |= (MGBE_DMA_CHX_INTR_AIE | + MGBE_DMA_CHX_INTR_FBEE | + MGBE_DMA_CHX_INTR_RBUE | + MGBE_DMA_CHX_INTR_TBUE | + MGBE_DMA_CHX_INTR_NIE); + osi_writel(val, (nveu8_t *)osi_dma->base + + MGBE_DMA_CHX_INTR_ENA(chan)); + } + + } else { + for (chinx = 0; chinx < osi_dma->num_dma_chans; chinx++) { + chan = osi_dma->dma_chans[chinx]; + val = osi_readl((nveu8_t *)osi_dma->base + + MGBE_DMA_CHX_INTR_ENA(chan)); + val &= (~MGBE_DMA_CHX_INTR_AIE & + ~MGBE_DMA_CHX_INTR_FBEE & + ~MGBE_DMA_CHX_INTR_RBUE & + ~MGBE_DMA_CHX_INTR_TBUE & + ~MGBE_DMA_CHX_INTR_NIE); + osi_writel(val, (nveu8_t *)osi_dma->base + + MGBE_DMA_CHX_INTR_ENA(chan)); + } + } +} +#endif + void mgbe_init_dma_chan_ops(struct dma_chan_ops *ops) { - ops->set_tx_ring_len = mgbe_set_tx_ring_len; - ops->set_rx_ring_len = mgbe_set_rx_ring_len; - ops->set_tx_ring_start_addr = mgbe_set_tx_ring_start_addr; - ops->set_rx_ring_start_addr = mgbe_set_rx_ring_start_addr; - ops->update_tx_tailptr = mgbe_update_tx_tailptr; - ops->update_rx_tailptr = mgbe_update_rx_tailptr; - ops->disable_chan_tx_intr = mgbe_disable_chan_tx_intr; - ops->enable_chan_tx_intr = mgbe_enable_chan_tx_intr; - ops->disable_chan_rx_intr = mgbe_disable_chan_rx_intr; - ops->enable_chan_rx_intr = mgbe_enable_chan_rx_intr; - ops->start_dma = mgbe_start_dma; - ops->stop_dma = mgbe_stop_dma; - ops->init_dma_channel = mgbe_init_dma_channel; - ops->set_rx_buf_len = mgbe_set_rx_buf_len; - ops->validate_regs = mgbe_validate_dma_regs; - ops->clear_vm_tx_intr = mgbe_clear_vm_tx_intr; - ops->clear_vm_rx_intr = mgbe_clear_vm_rx_intr; ops->config_slot = mgbe_config_slot; +#ifdef OSI_DEBUG + ops->debug_intr_config = mgbe_debug_intr_config; +#endif }; +#endif /* !OSI_STRIPPED_LIB */ diff --git a/kernel/nvethernetrm/osi/dma/mgbe_dma.h b/kernel/nvethernetrm/osi/dma/mgbe_dma.h index 93215013bd..0990d6ecf3 100644 --- a/kernel/nvethernetrm/osi/dma/mgbe_dma.h +++ b/kernel/nvethernetrm/osi/dma/mgbe_dma.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,17 +32,6 @@ #define MGBE_AXI_CLK_FREQ 480000000U /** @} */ -/** - * @@addtogroup Timestamp Capture Register - * @brief MGBE MAC Timestamp Register offset - * @{ - */ -#define MGBE_MAC_TSS 0X0D20 -#define MGBE_MAC_TS_NSEC 0x0D30 -#define MGBE_MAC_TS_SEC 0x0D34 -#define MGBE_MAC_TS_PID 0x0D38 -/** @} */ - /** * @addtogroup MGBE_DMA DMA Channel Register offsets * @@ -51,7 +40,9 @@ */ #define MGBE_DMA_CHX_TX_CTRL(x) ((0x0080U * (x)) + 0x3104U) #define MGBE_DMA_CHX_RX_CTRL(x) ((0x0080U * (x)) + 0x3108U) +#ifndef OSI_STRIPPED_LIB #define MGBE_DMA_CHX_SLOT_CTRL(x) ((0x0080U * (x)) + 0x310CU) +#endif /* !OSI_STRIPPED_LIB */ #define MGBE_DMA_CHX_INTR_ENA(x) ((0x0080U * (x)) + 0x3138U) #define MGBE_DMA_CHX_CTRL(x) ((0x0080U * (x)) + 0x3100U) #define MGBE_DMA_CHX_RX_WDT(x) ((0x0080U * (x)) + 0x313CU) @@ -60,22 +51,11 @@ #define MGBE_DMA_CHX_TDLH(x) ((0x0080U * (x)) + 0x3110U) #define MGBE_DMA_CHX_TDLA(x) ((0x0080U * (x)) + 0x3114U) #define MGBE_DMA_CHX_TDTLP(x) ((0x0080U * (x)) + 0x3124U) -#define MGBE_DMA_CHX_TDTHP(x) ((0x0080U * (x)) + 0x3120U) #define MGBE_DMA_CHX_RDLH(x) ((0x0080U * (x)) + 0x3118U) #define MGBE_DMA_CHX_RDLA(x) ((0x0080U * (x)) + 0x311CU) -#define MGBE_DMA_CHX_RDTHP(x) ((0x0080U * (x)) + 0x3128U) #define MGBE_DMA_CHX_RDTLP(x) ((0x0080U * (x)) + 0x312CU) /** @} */ -/** - * @addtogroup MGBE_INTR INT Channel Register offsets - * - * @brief MGBE Virtural Interrupt Channel register offsets - * @{ - */ -#define MGBE_VIRT_INTR_CHX_STATUS(x) (0x8604U + ((x) * 8U)) -#define MGBE_VIRT_INTR_CHX_CNTRL(x) (0x8600U + ((x) * 8U)) -#define MGBE_VIRT_INTR_APB_CHX_CNTRL(x) (0x8200U + ((x) * 4U)) /** @} */ /** @@ -84,44 +64,25 @@ * @brief Values defined for the MGBE registers * @{ */ -#define MGBE_DMA_CHX_TX_CTRL_OSP OSI_BIT(4) -#define MGBE_DMA_CHX_TX_CTRL_TSE OSI_BIT(12) #define MGBE_DMA_CHX_RX_WDT_RWT_MASK 0xFFU #define MGBE_DMA_CHX_RX_WDT_RWTU 2048U -#define MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE 3U -#define MGBE_DMA_CHX_RX_WDT_RWTU_MASK 3U -#define MGBE_DMA_CHX_RX_WDT_RWTU_SHIFT 12U -#define MGBE_DMA_CHX_RBSZ_MASK 0x7FFEU -#define MGBE_DMA_CHX_RBSZ_SHIFT 1U -#define MGBE_AXI_BUS_WIDTH 0x10U -#define MGBE_DMA_CHX_CTRL_PBLX8 OSI_BIT(16) -#define MGBE_DMA_CHX_INTR_TIE OSI_BIT(0) +#define MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE 0x3000U +#define MGBE_DMA_CHX_RX_WDT_RWTU_MASK 0x3000U +#ifdef OSI_DEBUG #define MGBE_DMA_CHX_INTR_TBUE OSI_BIT(2) -#define MGBE_DMA_CHX_INTR_RIE OSI_BIT(6) #define MGBE_DMA_CHX_INTR_RBUE OSI_BIT(7) #define MGBE_DMA_CHX_INTR_FBEE OSI_BIT(12) #define MGBE_DMA_CHX_INTR_AIE OSI_BIT(14) #define MGBE_DMA_CHX_INTR_NIE OSI_BIT(15) -#define MGBE_DMA_CHX_STATUS_TI OSI_BIT(0) -#define MGBE_DMA_CHX_STATUS_RI OSI_BIT(6) -#define MGBE_DMA_CHX_STATUS_NIS OSI_BIT(15) +#endif +#ifndef OSI_STRIPPED_LIB #define MGBE_DMA_CHX_SLOT_ESC OSI_BIT(0) -#define MGBE_DMA_CHX_STATUS_CLEAR_TX (MGBE_DMA_CHX_STATUS_TI | \ - MGBE_DMA_CHX_STATUS_NIS) -#define MGBE_DMA_CHX_STATUS_CLEAR_RX (MGBE_DMA_CHX_STATUS_RI | \ - MGBE_DMA_CHX_STATUS_NIS) -#define MGBE_VIRT_INTR_CHX_STATUS_TX OSI_BIT(0) -#define MGBE_VIRT_INTR_CHX_STATUS_RX OSI_BIT(1) -#define MGBE_VIRT_INTR_CHX_CNTRL_TX OSI_BIT(0) -#define MGBE_VIRT_INTR_CHX_CNTRL_RX OSI_BIT(1) +#endif /* !OSI_STRIPPED_LIB */ #define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED 64U #define MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT 24U #define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN 32U #define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN 64U #define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT 24U -#define MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN_PRESI 8U -#define MGBE_DMA_CHX_RX_CNTRL2_ORRQ_SCHAN_PRESI 16U -#define MGBE_DMA_RING_LENGTH_MASK 0xFFFFU #define MGBE_DMA_CHX_CTRL_PBL_SHIFT 16U /** @} */ @@ -131,35 +92,14 @@ * @brief Values defined for PBL settings * @{ */ -/* Tx and Rx Qsize is 64KB */ -#define MGBE_TXQ_RXQ_SIZE_FPGA 65536U /* Tx Queue size is 128KB */ #define MGBE_TXQ_SIZE 131072U /* Rx Queue size is 192KB */ #define MGBE_RXQ_SIZE 196608U /* MAX PBL value */ #define MGBE_DMA_CHX_MAX_PBL 256U +#define MGBE_DMA_CHX_MAX_PBL_VAL 0x200000U /* AXI Data width */ #define MGBE_AXI_DATAWIDTH 128U /** @} */ - -/** - * @addtogroup MGBE MAC timestamp registers bit field. - * - * @brief Values defined for the MGBE timestamp registers - * @{ - */ -#define MGBE_MAC_TSS_TXTSC OSI_BIT(15) -#define MGBE_MAC_TS_PID_MASK 0x3FFU -#define MGBE_MAC_TS_NSEC_MASK 0x7FFFFFFFU -/** @} */ - -/** - * @brief mgbe_get_dma_chan_ops - MGBE get DMA channel operations - * - * Algorithm: Returns pointer DMA channel operations structure. - * - * @returns Pointer to DMA channel operations structure - */ -struct osi_dma_chan_ops *mgbe_get_dma_chan_ops(void); #endif diff --git a/kernel/nvethernetrm/osi/dma/osi_dma.c b/kernel/nvethernetrm/osi/dma/osi_dma.c index 6d7e16f3f5..be197083a3 100644 --- a/kernel/nvethernetrm/osi/dma/osi_dma.c +++ b/kernel/nvethernetrm/osi/dma/osi_dma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,15 +32,92 @@ /** * @brief g_dma - DMA local data array. */ -static struct dma_local g_dma[MAX_DMA_INSTANCES]; /** * @brief g_ops - local DMA HW operations array. */ -static struct dma_chan_ops g_ops[MAX_MAC_IP_TYPES]; + +typedef nve32_t (*dma_intr_fn)(struct osi_dma_priv_data const *osi_dma, + nveu32_t intr_ctrl, nveu32_t intr_status, + nveu32_t dma_status, nveu32_t val); +static inline nve32_t enable_intr(struct osi_dma_priv_data const *osi_dma, + nveu32_t intr_ctrl, nveu32_t intr_status, + nveu32_t dma_status, nveu32_t val); +static inline nve32_t disable_intr(struct osi_dma_priv_data const *osi_dma, + nveu32_t intr_ctrl, nveu32_t intr_status, + nveu32_t dma_status, nveu32_t val); +static dma_intr_fn intr_fn[2] = { disable_intr, enable_intr }; + +static inline nveu32_t set_pos_val(nveu32_t val, nveu32_t pos_val) +{ + return (val | pos_val); +} + +static inline nveu32_t clear_pos_val(nveu32_t val, nveu32_t pos_val) +{ + return (val & ~pos_val); +} + +static inline nve32_t intr_en_dis_retry(nveu8_t *base, nveu32_t intr_ctrl, + nveu32_t val, nveu32_t en_dis) +{ + typedef nveu32_t (*set_clear)(nveu32_t val, nveu32_t pos); + const set_clear set_clr[2] = { clear_pos_val, set_pos_val }; + nveu32_t cntrl1, cntrl2, i; + nve32_t ret = -1; + + for (i = 0U; i < 10U; i++) { + cntrl1 = osi_readl(base + intr_ctrl); + cntrl1 = set_clr[en_dis](cntrl1, val); + osi_writel(cntrl1, base + intr_ctrl); + + cntrl2 = osi_readl(base + intr_ctrl); + if (cntrl1 == cntrl2) { + ret = 0; + break; + } else { + continue; + } + } + + return ret; +} + +static inline nve32_t enable_intr(struct osi_dma_priv_data const *osi_dma, + nveu32_t intr_ctrl, OSI_UNUSED nveu32_t intr_status, + OSI_UNUSED nveu32_t dma_status, nveu32_t val) +{ + return intr_en_dis_retry((nveu8_t *)osi_dma->base, intr_ctrl, + val, OSI_DMA_INTR_ENABLE); +} + +static inline nve32_t disable_intr(struct osi_dma_priv_data const *osi_dma, + nveu32_t intr_ctrl, nveu32_t intr_status, + nveu32_t dma_status, nveu32_t val) +{ + nveu8_t *base = (nveu8_t *)osi_dma->base; + const nveu32_t status_val[4] = { + 0, + EQOS_DMA_CHX_STATUS_CLEAR_TX, + EQOS_DMA_CHX_STATUS_CLEAR_RX, + 0, + }; + nveu32_t status; + + status = osi_readl(base + intr_status); + if ((status & val) == val) { + osi_writel(status_val[val], base + dma_status); + osi_writel(val, base + intr_status); + } + + return intr_en_dis_retry((nveu8_t *)osi_dma->base, intr_ctrl, + val, OSI_DMA_INTR_DISABLE); +} struct osi_dma_priv_data *osi_get_dma(void) { + static struct dma_local g_dma[MAX_DMA_INSTANCES]; + struct osi_dma_priv_data *osi_dma = OSI_NULL; nveu32_t i; for (i = 0U; i < MAX_DMA_INSTANCES; i++) { @@ -52,12 +129,14 @@ struct osi_dma_priv_data *osi_get_dma(void) } if (i == MAX_DMA_INSTANCES) { - return OSI_NULL; + goto fail; } g_dma[i].magic_num = (nveu64_t)&g_dma[i].osi_dma; - return &g_dma[i].osi_dma; + osi_dma = &g_dma[i].osi_dma; +fail: + return osi_dma; } /** @@ -75,15 +154,17 @@ struct osi_dma_priv_data *osi_get_dma(void) * @retval 0 on Success * @retval -1 on Failure */ -static inline nve32_t validate_args(struct osi_dma_priv_data *osi_dma, - struct dma_local *l_dma) +static inline nve32_t dma_validate_args(const struct osi_dma_priv_data *const osi_dma, + const struct dma_local *const l_dma) { + nve32_t ret = 0; + if ((osi_dma == OSI_NULL) || (osi_dma->base == OSI_NULL) || (l_dma->init_done == OSI_DISABLE)) { - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -104,15 +185,16 @@ static inline nve32_t validate_args(struct osi_dma_priv_data *osi_dma, static inline nve32_t validate_dma_chan_num(struct osi_dma_priv_data *osi_dma, nveu32_t chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; - if (chan >= l_dma->max_chans) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if (chan >= l_dma->num_max_chans) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid DMA channel number\n", chan); - return -1; + ret = -1; } - return 0; + return ret; } /** @@ -131,21 +213,23 @@ static inline nve32_t validate_dma_chan_num(struct osi_dma_priv_data *osi_dma, */ static inline nve32_t validate_dma_chans(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; - nveu32_t i = 0; + const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma; + nveu32_t i = 0U; + nve32_t ret = 0; for (i = 0; i < osi_dma->num_dma_chans; i++) { - if (osi_dma->dma_chans[i] > l_dma->max_chans) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if (osi_dma->dma_chans[i] > l_dma->num_max_chans) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid DMA channel number:\n", osi_dma->dma_chans[i]); - return -1; + ret = -1; } } - return 0; + return ret; } +#ifndef OSI_STRIPPED_LIB /** * @brief Function to validate function pointers. * @@ -171,14 +255,15 @@ static nve32_t validate_func_ptrs(struct osi_dma_priv_data *osi_dma, #elif __SIZEOF_POINTER__ == 4 nveu32_t *l_ops = (nveu32_t *)temp_ops; #else - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA: Undefined architecture\n", 0ULL); return -1; #endif + (void) osi_dma; for (i = 0; i < (sizeof(*ops_p) / (nveu64_t)__SIZEOF_POINTER__); i++) { if (*l_ops == 0U) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma: fn ptr validation failed at\n", (nveu64_t)i); return -1; @@ -189,30 +274,31 @@ static nve32_t validate_func_ptrs(struct osi_dma_priv_data *osi_dma, return 0; } +#endif nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; - nveu32_t default_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_DEFAULT_RING_SZ }; - nveu32_t max_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_MAX_RING_SZ }; + const nveu32_t default_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_DEFAULT_RING_SZ }; + const nveu32_t max_rz[] = { EQOS_DEFAULT_RING_SZ, MGBE_MAX_RING_SZ }; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + static struct dma_chan_ops dma_gops[MAX_MAC_IP_TYPES]; +#ifndef OSI_STRIPPED_LIB typedef void (*init_ops_arr)(struct dma_chan_ops *temp); - typedef void *(*safety_init)(void); - - init_ops_arr i_ops[MAX_MAC_IP_TYPES] = { + const init_ops_arr i_ops[MAX_MAC_IP_TYPES] = { eqos_init_dma_chan_ops, mgbe_init_dma_chan_ops }; - - safety_init s_init[MAX_MAC_IP_TYPES] = { - eqos_get_dma_safety_config, OSI_NULL - }; +#endif + nve32_t ret = 0; if (osi_dma == OSI_NULL) { - return -1; + ret = -1; + goto fail; } if ((l_dma->magic_num != (nveu64_t)osi_dma) || (l_dma->init_done == OSI_ENABLE)) { - return -1; + ret = -1; + goto fail; } if (osi_dma->is_ethernet_server != OSI_ENABLE) { @@ -223,115 +309,295 @@ nve32_t osi_init_dma_ops(struct osi_dma_priv_data *osi_dma) (osi_dma->osd_ops.printf == OSI_NULL) || #endif /* OSI_DEBUG */ (osi_dma->osd_ops.udelay == OSI_NULL)) { - return -1; + ret = -1; + goto fail; } } if (osi_dma->mac > OSI_MAC_HW_MGBE) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA: Invalid MAC HW type\n", 0ULL); - return -1; + ret = -1; + goto fail; } if ((osi_dma->tx_ring_sz == 0U) || - !(is_power_of_two(osi_dma->tx_ring_sz)) || + (is_power_of_two(osi_dma->tx_ring_sz) == 0U) || (osi_dma->tx_ring_sz < HW_MIN_RING_SZ) || (osi_dma->tx_ring_sz > default_rz[osi_dma->mac])) { - osi_dma->tx_ring_sz = default_rz[osi_dma->mac]; - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "DMA: Using default Tx ring size: \n", + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "DMA: Invalid Tx ring size:\n", osi_dma->tx_ring_sz); + ret = -1; + goto fail; } if ((osi_dma->rx_ring_sz == 0U) || - !(is_power_of_two(osi_dma->rx_ring_sz)) || + (is_power_of_two(osi_dma->rx_ring_sz) == 0U) || (osi_dma->rx_ring_sz < HW_MIN_RING_SZ) || (osi_dma->rx_ring_sz > max_rz[osi_dma->mac])) { - osi_dma->rx_ring_sz = default_rz[osi_dma->mac]; - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "DMA: Using default rx ring size: \n", + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "DMA: Invalid Rx ring size:\n", osi_dma->tx_ring_sz); + ret = -1; + goto fail; } - - i_ops[osi_dma->mac](&g_ops[osi_dma->mac]); - - if (s_init[osi_dma->mac] != OSI_NULL) { - osi_dma->safety_config = s_init[osi_dma->mac](); - } - +#ifndef OSI_STRIPPED_LIB + i_ops[osi_dma->mac](&dma_gops[osi_dma->mac]); +#endif if (init_desc_ops(osi_dma) < 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA desc ops init failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } - if (validate_func_ptrs(osi_dma, &g_ops[osi_dma->mac]) < 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, +#ifndef OSI_STRIPPED_LIB + if (validate_func_ptrs(osi_dma, &dma_gops[osi_dma->mac]) < 0) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA ops validation failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } +#endif - l_dma->ops_p = &g_ops[osi_dma->mac]; + l_dma->ops_p = &dma_gops[osi_dma->mac]; l_dma->init_done = OSI_ENABLE; - return 0; +fail: + return ret; +} + +static inline void start_dma(const struct osi_dma_priv_data *const osi_dma, nveu32_t dma_chan) +{ + nveu32_t chan = dma_chan & 0xFU; + const nveu32_t tx_dma_reg[2] = { + EQOS_DMA_CHX_TX_CTRL(chan), + MGBE_DMA_CHX_TX_CTRL(chan) + }; + const nveu32_t rx_dma_reg[2] = { + EQOS_DMA_CHX_RX_CTRL(chan), + MGBE_DMA_CHX_RX_CTRL(chan) + }; + nveu32_t val; + + /* Start Tx DMA */ + val = osi_readl((nveu8_t *)osi_dma->base + tx_dma_reg[osi_dma->mac]); + val |= OSI_BIT(0); + osi_writel(val, (nveu8_t *)osi_dma->base + tx_dma_reg[osi_dma->mac]); + + /* Start Rx DMA */ + val = osi_readl((nveu8_t *)osi_dma->base + rx_dma_reg[osi_dma->mac]); + val |= OSI_BIT(0); + val &= ~OSI_BIT(31); + osi_writel(val, (nveu8_t *)osi_dma->base + rx_dma_reg[osi_dma->mac]); +} + +static void init_dma_channel(const struct osi_dma_priv_data *const osi_dma, + nveu32_t dma_chan) +{ + nveu32_t chan = dma_chan & 0xFU; + nveu32_t riwt = osi_dma->rx_riwt & 0xFFFU; + const nveu32_t intr_en_reg[2] = { + EQOS_DMA_CHX_INTR_ENA(chan), + MGBE_DMA_CHX_INTR_ENA(chan) + }; + const nveu32_t chx_ctrl_reg[2] = { + EQOS_DMA_CHX_CTRL(chan), + MGBE_DMA_CHX_CTRL(chan) + }; + const nveu32_t tx_ctrl_reg[2] = { + EQOS_DMA_CHX_TX_CTRL(chan), + MGBE_DMA_CHX_TX_CTRL(chan) + }; + const nveu32_t rx_ctrl_reg[2] = { + EQOS_DMA_CHX_RX_CTRL(chan), + MGBE_DMA_CHX_RX_CTRL(chan) + }; + const nveu32_t rx_wdt_reg[2] = { + EQOS_DMA_CHX_RX_WDT(chan), + MGBE_DMA_CHX_RX_WDT(chan) + }; + const nveu32_t tx_pbl[2] = { + EQOS_DMA_CHX_TX_CTRL_TXPBL_RECOMMENDED, + ((((MGBE_TXQ_SIZE / osi_dma->num_dma_chans) - + osi_dma->mtu) / (MGBE_AXI_DATAWIDTH / 8U)) - 5U) + }; + const nveu32_t rx_pbl[2] = { + EQOS_DMA_CHX_RX_CTRL_RXPBL_RECOMMENDED, + ((MGBE_RXQ_SIZE / osi_dma->num_dma_chans) / 2U) + }; + const nveu32_t rwt_val[2] = { + (((riwt * (EQOS_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) / + EQOS_DMA_CHX_RX_WDT_RWTU) & EQOS_DMA_CHX_RX_WDT_RWT_MASK), + (((riwt * ((nveu32_t)MGBE_AXI_CLK_FREQ / OSI_ONE_MEGA_HZ)) / + MGBE_DMA_CHX_RX_WDT_RWTU) & MGBE_DMA_CHX_RX_WDT_RWT_MASK) + }; + const nveu32_t rwtu_val[2] = { + EQOS_DMA_CHX_RX_WDT_RWTU_512_CYCLE, + MGBE_DMA_CHX_RX_WDT_RWTU_2048_CYCLE + }; + const nveu32_t rwtu_mask[2] = { + EQOS_DMA_CHX_RX_WDT_RWTU_MASK, + MGBE_DMA_CHX_RX_WDT_RWTU_MASK + }; + const nveu32_t owrq = (MGBE_DMA_CHX_RX_CNTRL2_OWRQ_MCHAN / osi_dma->num_dma_chans); + const nveu32_t owrq_arr[OSI_MGBE_MAX_NUM_CHANS] = { + MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SCHAN, owrq, owrq, owrq, + owrq, owrq, owrq, owrq, owrq, owrq + }; + nveu32_t val; + + /* Enable Transmit/Receive interrupts */ + val = osi_readl((nveu8_t *)osi_dma->base + intr_en_reg[osi_dma->mac]); + val |= (DMA_CHX_INTR_TIE | DMA_CHX_INTR_RIE); + osi_writel(val, (nveu8_t *)osi_dma->base + intr_en_reg[osi_dma->mac]); + + /* Enable PBLx8 */ + val = osi_readl((nveu8_t *)osi_dma->base + chx_ctrl_reg[osi_dma->mac]); + val |= DMA_CHX_CTRL_PBLX8; + osi_writel(val, (nveu8_t *)osi_dma->base + chx_ctrl_reg[osi_dma->mac]); + + /* Program OSP, TSO enable and TXPBL */ + val = osi_readl((nveu8_t *)osi_dma->base + tx_ctrl_reg[osi_dma->mac]); + val |= (DMA_CHX_TX_CTRL_OSP | DMA_CHX_TX_CTRL_TSE); + + if (osi_dma->mac == OSI_MAC_HW_EQOS) { + val |= tx_pbl[osi_dma->mac]; + } else { + /* + * Formula for TxPBL calculation is + * (TxPBL) < ((TXQSize - MTU)/(DATAWIDTH/8)) - 5 + * if TxPBL exceeds the value of 256 then we need to make use of 256 + * as the TxPBL else we should be using the value whcih we get after + * calculation by using above formula + */ + if (tx_pbl[osi_dma->mac] >= MGBE_DMA_CHX_MAX_PBL) { + val |= MGBE_DMA_CHX_MAX_PBL_VAL; + } else { + val |= ((tx_pbl[osi_dma->mac] / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); + } + } + osi_writel(val, (nveu8_t *)osi_dma->base + tx_ctrl_reg[osi_dma->mac]); + + val = osi_readl((nveu8_t *)osi_dma->base + rx_ctrl_reg[osi_dma->mac]); + val &= ~DMA_CHX_RBSZ_MASK; + /** Subtract 30 bytes again which were added for buffer address alignment + * HW don't need those extra 30 bytes. If data length received more than + * below programed value then it will result in two descriptors which + * eventually drop by OSI. Subtracting 30 bytes so that HW don't receive + * unwanted length data. + **/ + val |= ((osi_dma->rx_buf_len - 30U) << DMA_CHX_RBSZ_SHIFT); + if (osi_dma->mac == OSI_MAC_HW_EQOS) { + val |= rx_pbl[osi_dma->mac]; + } else { + if (rx_pbl[osi_dma->mac] >= MGBE_DMA_CHX_MAX_PBL) { + val |= MGBE_DMA_CHX_MAX_PBL_VAL; + } else { + val |= ((rx_pbl[osi_dma->mac] / 8U) << MGBE_DMA_CHX_CTRL_PBL_SHIFT); + } + } + osi_writel(val, (nveu8_t *)osi_dma->base + rx_ctrl_reg[osi_dma->mac]); + + if ((osi_dma->use_riwt == OSI_ENABLE) && + (osi_dma->rx_riwt < UINT_MAX)) { + val = osi_readl((nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]); + val &= ~DMA_CHX_RX_WDT_RWT_MASK; + val |= rwt_val[osi_dma->mac]; + osi_writel(val, (nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]); + + val = osi_readl((nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]); + val &= ~rwtu_mask[osi_dma->mac]; + val |= rwtu_val[osi_dma->mac]; + osi_writel(val, (nveu8_t *)osi_dma->base + rx_wdt_reg[osi_dma->mac]); + } + + if (osi_dma->mac == OSI_MAC_HW_MGBE) { + /* Update ORRQ in DMA_CH(#i)_Tx_Control2 register */ + val = osi_readl((nveu8_t *)osi_dma->base + MGBE_DMA_CHX_TX_CNTRL2(chan)); + val |= (((MGBE_DMA_CHX_TX_CNTRL2_ORRQ_RECOMMENDED / osi_dma->num_dma_chans)) << + MGBE_DMA_CHX_TX_CNTRL2_ORRQ_SHIFT); + osi_writel(val, (nveu8_t *)osi_dma->base + MGBE_DMA_CHX_TX_CNTRL2(chan)); + + /* Update OWRQ in DMA_CH(#i)_Rx_Control2 register */ + val = osi_readl((nveu8_t *)osi_dma->base + MGBE_DMA_CHX_RX_CNTRL2(chan)); + val |= (owrq_arr[osi_dma->num_dma_chans - 1U] << MGBE_DMA_CHX_RX_CNTRL2_OWRQ_SHIFT); + osi_writel(val, (nveu8_t *)osi_dma->base + MGBE_DMA_CHX_RX_CNTRL2(chan)); + } } nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; nveu32_t i, chan; - nve32_t ret; + nve32_t ret = 0; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } l_dma->mac_ver = osi_readl((nveu8_t *)osi_dma->base + MAC_VERSION) & MAC_VERSION_SNVER_MASK; if (validate_mac_ver_update_chans(l_dma->mac_ver, - &l_dma->max_chans) == 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + &l_dma->num_max_chans, + &l_dma->l_mac_ver) == 0) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid MAC version\n", (nveu64_t)l_dma->mac_ver); - return -1; + ret = -1; + goto fail; } - if (osi_dma->num_dma_chans > l_dma->max_chans) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if ((osi_dma->num_dma_chans == 0U) || + (osi_dma->num_dma_chans > l_dma->num_max_chans)) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid number of DMA channels\n", 0ULL); - return -1; + ret = -1; + goto fail; } if (validate_dma_chans(osi_dma) < 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA channels validation failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } - ret = l_dma->ops_p->init_dma_channel(osi_dma); - if (ret < 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, - "dma: init dma channel failed\n", 0ULL); - return ret; - } - - ret = dma_desc_init(osi_dma, l_dma->ops_p); + ret = dma_desc_init(osi_dma); if (ret != 0) { - return ret; - } - - if ((l_dma->mac_ver != OSI_EQOS_MAC_4_10) && - (l_dma->mac_ver != OSI_EQOS_MAC_5_00)) { - l_dma->vm_intr = OSI_ENABLE; + goto fail; } /* Enable channel interrupts at wrapper level and start DMA */ for (i = 0; i < osi_dma->num_dma_chans; i++) { chan = osi_dma->dma_chans[i]; - l_dma->ops_p->enable_chan_tx_intr(osi_dma->base, chan); - l_dma->ops_p->enable_chan_rx_intr(osi_dma->base, chan); - l_dma->ops_p->start_dma(osi_dma, chan); + init_dma_channel(osi_dma, chan); + + ret = intr_fn[OSI_DMA_INTR_ENABLE](osi_dma, + VIRT_INTR_CHX_CNTRL(chan), + VIRT_INTR_CHX_STATUS(chan), + ((osi_dma->mac == OSI_MAC_HW_MGBE) ? + MGBE_DMA_CHX_STATUS(chan) : + EQOS_DMA_CHX_STATUS(chan)), + OSI_BIT(OSI_DMA_CH_TX_INTR)); + if (ret < 0) { + goto fail; + } + + ret = intr_fn[OSI_DMA_INTR_ENABLE](osi_dma, + VIRT_INTR_CHX_CNTRL(chan), + VIRT_INTR_CHX_STATUS(chan), + ((osi_dma->mac == OSI_MAC_HW_MGBE) ? + MGBE_DMA_CHX_STATUS(chan) : + EQOS_DMA_CHX_STATUS(chan)), + OSI_BIT(OSI_DMA_CH_RX_INTR)); + if (ret < 0) { + goto fail; + } + + start_dma(osi_dma, chan); } /** @@ -342,158 +608,81 @@ nve32_t osi_hw_dma_init(struct osi_dma_priv_data *osi_dma) osi_dma->ptp_flag = (OSI_PTP_SYNC_SLAVE | OSI_PTP_SYNC_TWOSTEP); } - return 0; +fail: + return ret; +} + +static inline void stop_dma(const struct osi_dma_priv_data *const osi_dma, + nveu32_t dma_chan) +{ + nveu32_t chan = dma_chan & 0xFU; + const nveu32_t dma_tx_reg[2] = { + EQOS_DMA_CHX_TX_CTRL(chan), + MGBE_DMA_CHX_TX_CTRL(chan) + }; + const nveu32_t dma_rx_reg[2] = { + EQOS_DMA_CHX_RX_CTRL(chan), + MGBE_DMA_CHX_RX_CTRL(chan) + }; + nveu32_t val; + + /* Stop Tx DMA */ + val = osi_readl((nveu8_t *)osi_dma->base + dma_tx_reg[osi_dma->mac]); + val &= ~OSI_BIT(0); + osi_writel(val, (nveu8_t *)osi_dma->base + dma_tx_reg[osi_dma->mac]); + + /* Stop Rx DMA */ + val = osi_readl((nveu8_t *)osi_dma->base + dma_rx_reg[osi_dma->mac]); + val &= ~OSI_BIT(0); + val |= OSI_BIT(31); + osi_writel(val, (nveu8_t *)osi_dma->base + dma_rx_reg[osi_dma->mac]); } nve32_t osi_hw_dma_deinit(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; nveu32_t i; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } - if (osi_dma->num_dma_chans > l_dma->max_chans) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + if (osi_dma->num_dma_chans > l_dma->num_max_chans) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "Invalid number of DMA channels\n", 0ULL); - return -1; + ret = -1; + goto fail; } if (validate_dma_chans(osi_dma) < 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA channels validation failed\n", 0ULL); - return -1; + ret = -1; + goto fail; } for (i = 0; i < osi_dma->num_dma_chans; i++) { - l_dma->ops_p->stop_dma(osi_dma, osi_dma->dma_chans[i]); - } - - /* FIXME: Need to fix */ -// l_dma->magic_num = 0; -// l_dma->init_done = OSI_DISABLE; - - return 0; -} - -nve32_t osi_disable_chan_tx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->disable_chan_tx_intr(osi_dma->base, chan); - - return 0; -} - -nve32_t osi_enable_chan_tx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->enable_chan_tx_intr(osi_dma->base, chan); - - return 0; -} - -nve32_t osi_disable_chan_rx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->disable_chan_rx_intr(osi_dma->base, chan); - - return 0; -} - -nve32_t osi_enable_chan_rx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->enable_chan_rx_intr(osi_dma->base, chan); - - return 0; -} - -nve32_t osi_clear_vm_tx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->clear_vm_tx_intr(osi_dma->base, chan); - - return 0; -} - -nve32_t osi_clear_vm_rx_intr(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + stop_dma(osi_dma, osi_dma->dma_chans[i]); } - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->clear_vm_rx_intr(osi_dma->base, chan); - - return 0; +fail: + return ret; } nveu32_t osi_get_global_dma_status(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nveu32_t ret = 0U; - if (validate_args(osi_dma, l_dma) < 0) { - return 0; + if (dma_validate_args(osi_dma, l_dma) < 0) { + goto fail; } - return osi_readl((nveu8_t *)osi_dma->base + HW_GLOBAL_DMA_STATUS); + ret = osi_readl((nveu8_t *)osi_dma->base + HW_GLOBAL_DMA_STATUS); +fail: + return ret; } nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma, @@ -501,86 +690,54 @@ nve32_t osi_handle_dma_intr(struct osi_dma_priv_data *osi_dma, nveu32_t tx_rx, nveu32_t en_dis) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; - typedef void (*dma_intr_fn)(void *base, nveu32_t ch); - dma_intr_fn fn[2][2][2] = { - { { l_dma->ops_p->disable_chan_tx_intr, l_dma->ops_p->enable_chan_tx_intr }, - { l_dma->ops_p->disable_chan_rx_intr, l_dma->ops_p->enable_chan_rx_intr } }, - { { l_dma->ops_p->clear_vm_tx_intr, l_dma->ops_p->enable_chan_tx_intr }, - { l_dma->ops_p->clear_vm_rx_intr, l_dma->ops_p->enable_chan_rx_intr } } - }; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; + ret = -1; + goto fail; } if ((tx_rx > OSI_DMA_CH_RX_INTR) || (en_dis > OSI_DMA_INTR_ENABLE)) { - return -1; - } - - fn[l_dma->vm_intr][tx_rx][en_dis](osi_dma->base, chan); - - return 0; -} - -nve32_t osi_start_dma(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->start_dma(osi_dma, chan); - - return 0; -} - -nve32_t osi_stop_dma(struct osi_dma_priv_data *osi_dma, - nveu32_t chan) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + ret = -1; + goto fail; } - if (validate_dma_chan_num(osi_dma, chan) < 0) { - return -1; - } - - l_dma->ops_p->stop_dma(osi_dma, chan); + ret = intr_fn[en_dis](osi_dma, VIRT_INTR_CHX_CNTRL(chan), + VIRT_INTR_CHX_STATUS(chan), ((osi_dma->mac == OSI_MAC_HW_MGBE) ? + MGBE_DMA_CHX_STATUS(chan) : EQOS_DMA_CHX_STATUS(chan)), + OSI_BIT(tx_rx)); - return 0; +fail: + return ret; } -nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma, - unsigned int chan) +nveu32_t osi_get_refill_rx_desc_cnt(const struct osi_dma_priv_data *const osi_dma, + nveu32_t chan) { - struct osi_rx_ring *rx_ring = osi_dma->rx_ring[chan]; + const struct osi_rx_ring *const rx_ring = osi_dma->rx_ring[chan]; + nveu32_t ret = 0U; if ((rx_ring == OSI_NULL) || (rx_ring->cur_rx_idx >= osi_dma->rx_ring_sz) || (rx_ring->refill_idx >= osi_dma->rx_ring_sz)) { - return 0; + goto fail; } - return (rx_ring->cur_rx_idx - rx_ring->refill_idx) & + ret = (rx_ring->cur_rx_idx - rx_ring->refill_idx) & (osi_dma->rx_ring_sz - 1U); +fail: + return ret; } /** - * @brief rx_dma_desc_validate_args - DMA Rx descriptor init args Validate + * @brief rx_dma_desc_dma_validate_args - DMA Rx descriptor init args Validate * * Algorithm: Validates DMA Rx descriptor init argments. * @@ -597,30 +754,36 @@ nveu32_t osi_get_refill_rx_desc_cnt(struct osi_dma_priv_data *osi_dma, * @retval 0 on success * @retval -1 on failure. */ -static inline nve32_t rx_dma_desc_validate_args( +static inline nve32_t rx_dma_desc_dma_validate_args( struct osi_dma_priv_data *osi_dma, struct dma_local *l_dma, - struct osi_rx_ring *rx_ring, + const struct osi_rx_ring *const rx_ring, nveu32_t chan) { - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + nve32_t ret = 0; + + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } if (!((rx_ring != OSI_NULL) && (rx_ring->rx_swcx != OSI_NULL) && (rx_ring->rx_desc != OSI_NULL))) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } if (validate_dma_chan_num(osi_dma, chan) < 0) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma: Invalid channel\n", 0ULL); - return -1; + ret = -1; + goto fail; } - return 0; +fail: + return ret; } /** @@ -641,8 +804,8 @@ static inline nve32_t rx_dma_desc_validate_args( * - De-initialization: No * */ -static inline void rx_dma_handle_ioc(struct osi_dma_priv_data *osi_dma, - struct osi_rx_ring *rx_ring, +static inline void rx_dma_handle_ioc(const struct osi_dma_priv_data *const osi_dma, + const struct osi_rx_ring *const rx_ring, struct osi_rx_desc *rx_desc) { /* reset IOC bit if RWIT is enabled */ @@ -663,14 +826,16 @@ static inline void rx_dma_handle_ioc(struct osi_dma_priv_data *osi_dma, nve32_t osi_rx_dma_desc_init(struct osi_dma_priv_data *osi_dma, struct osi_rx_ring *rx_ring, nveu32_t chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; - nveu64_t tailptr = 0; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; struct osi_rx_swcx *rx_swcx = OSI_NULL; struct osi_rx_desc *rx_desc = OSI_NULL; + nveu64_t tailptr = 0; + nve32_t ret = 0; - if (rx_dma_desc_validate_args(osi_dma, l_dma, rx_ring, chan) < 0) { + if (rx_dma_desc_dma_validate_args(osi_dma, l_dma, rx_ring, chan) < 0) { /* Return on arguments validation failure */ - return -1; + ret = -1; + goto fail; } /* Refill buffers */ @@ -714,103 +879,139 @@ nve32_t osi_rx_dma_desc_init(struct osi_dma_priv_data *osi_dma, if (osi_unlikely(tailptr < rx_ring->rx_desc_phy_addr)) { /* Will not hit this case, used for CERT-C compliance */ - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma: Invalid tailptr\n", 0ULL); - return -1; + ret = -1; + goto fail; } - l_dma->ops_p->update_rx_tailptr(osi_dma->base, chan, tailptr); + update_rx_tail_ptr(osi_dma, chan, tailptr); - return 0; +fail: + return ret; } nve32_t osi_set_rx_buf_len(struct osi_dma_priv_data *osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nveu32_t rx_buf_len; + nve32_t ret = 0; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; + goto fail; } - l_dma->ops_p->set_rx_buf_len(osi_dma); + if (osi_dma->mtu > OSI_MAX_MTU_SIZE) { + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, + "Invalid MTU setting\n", 0ULL); + ret = -1; + goto fail; + } - return 0; + /* Add Ethernet header + FCS */ + rx_buf_len = osi_dma->mtu + OSI_ETH_HLEN + NV_VLAN_HLEN; + + /* Add 30 bytes (15bytes extra at head portion for alignment and 15bytes + * extra to cover tail portion) again for the buffer address alignment + */ + rx_buf_len += 30U; + + /* Buffer alignment */ + osi_dma->rx_buf_len = ((rx_buf_len + (AXI_BUS_WIDTH - 1U)) & + ~(AXI_BUS_WIDTH - 1U)); + +fail: + return ret; } nve32_t osi_dma_get_systime_from_mac(struct osi_dma_priv_data *const osi_dma, nveu32_t *sec, nveu32_t *nsec) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; - if (validate_args(osi_dma, l_dma) < 0) { - return -1; + if (dma_validate_args(osi_dma, l_dma) < 0) { + ret = -1; } common_get_systime_from_mac(osi_dma->base, osi_dma->mac, sec, nsec); - return 0; + return ret; } nveu32_t osi_is_mac_enabled(struct osi_dma_priv_data *const osi_dma) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nveu32_t ret = OSI_DISABLE; - if (validate_args(osi_dma, l_dma) < 0) { - return OSI_DISABLE; + if (dma_validate_args(osi_dma, l_dma) < 0) { + goto fail; } - return common_is_mac_enabled(osi_dma->base, osi_dma->mac); + ret = common_is_mac_enabled(osi_dma->base, osi_dma->mac); +fail: + return ret; } nve32_t osi_hw_transmit(struct osi_dma_priv_data *osi_dma, nveu32_t chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; - if (osi_unlikely(validate_args(osi_dma, l_dma) < 0)) { - return -1; + if (osi_unlikely(dma_validate_args(osi_dma, l_dma) < 0)) { + ret = -1; + goto fail; } if (osi_unlikely(validate_dma_chan_num(osi_dma, chan) < 0)) { - return -1; + ret = -1; + goto fail; } if (osi_unlikely(osi_dma->tx_ring[chan] == OSI_NULL)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA: Invalid Tx ring\n", 0ULL); - return -1; + ret = -1; + goto fail; } - return hw_transmit(osi_dma, osi_dma->tx_ring[chan], l_dma->ops_p, chan); + ret = hw_transmit(osi_dma, osi_dma->tx_ring[chan], chan); +fail: + return ret; } +#ifdef OSI_DEBUG nve32_t osi_dma_ioctl(struct osi_dma_priv_data *osi_dma) { struct dma_local *l_dma = (struct dma_local *)osi_dma; struct osi_dma_ioctl_data *data; - if (osi_unlikely(validate_args(osi_dma, l_dma) < 0)) { + if (osi_unlikely(dma_validate_args(osi_dma, l_dma) < 0)) { return -1; } data = &osi_dma->ioctl_data; switch (data->cmd) { -#ifdef OSI_DEBUG case OSI_DMA_IOCTL_CMD_REG_DUMP: reg_dump(osi_dma); break; case OSI_DMA_IOCTL_CMD_STRUCTS_DUMP: structs_dump(osi_dma); break; -#endif /* OSI_DEBUG */ + case OSI_DMA_IOCTL_CMD_DEBUG_INTR_CONFIG: + l_dma->ops_p->debug_intr_config(osi_dma); + break; default: - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "DMA: Invalid IOCTL command", 0ULL); return -1; } return 0; } +#endif /* OSI_DEBUG */ #ifndef OSI_STRIPPED_LIB @@ -840,7 +1041,7 @@ static inline nve32_t osi_slot_args_validate(struct osi_dma_priv_data *osi_dma, struct dma_local *l_dma, nveu32_t set) { - if (validate_args(osi_dma, l_dma) < 0) { + if (dma_validate_args(osi_dma, l_dma) < 0) { return -1; } @@ -871,7 +1072,7 @@ nve32_t osi_config_slot_function(struct osi_dma_priv_data *osi_dma, chan = osi_dma->dma_chans[i]; if ((chan == 0x0U) || - (chan >= l_dma->max_chans)) { + (chan >= l_dma->num_max_chans)) { /* Ignore 0 and invalid channels */ continue; } @@ -902,17 +1103,6 @@ nve32_t osi_config_slot_function(struct osi_dma_priv_data *osi_dma, return 0; } -nve32_t osi_validate_dma_regs(struct osi_dma_priv_data *osi_dma) -{ - struct dma_local *l_dma = (struct dma_local *)osi_dma; - - if (validate_args(osi_dma, l_dma) < 0) { - return -1; - } - - return l_dma->ops_p->validate_regs(osi_dma); -} - nve32_t osi_txring_empty(struct osi_dma_priv_data *osi_dma, nveu32_t chan) { struct osi_tx_ring *tx_ring = osi_dma->tx_ring[chan]; diff --git a/kernel/nvethernetrm/osi/dma/osi_dma_txrx.c b/kernel/nvethernetrm/osi/dma/osi_dma_txrx.c index 336ed3217b..93c529ebf8 100644 --- a/kernel/nvethernetrm/osi/dma/osi_dma_txrx.c +++ b/kernel/nvethernetrm/osi/dma/osi_dma_txrx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,45 +32,6 @@ static struct desc_ops d_ops[MAX_MAC_IP_TYPES]; -/** - * @brief get_rx_err_stats - Detect Errors from Rx Descriptor - * - * @note - * Algorithm: - * - This routine will be invoked by OSI layer itself which - * checks for the Last Descriptor and updates the receive status errors - * accordingly. - * - * @note - * API Group: - * - Initialization: No - * - Run time: Yes - * - De-initialization: No - * - * @param[in] rx_desc: Rx Descriptor. - * @param[in, out] pkt_err_stats: Packet error stats which stores the errors - * reported - */ -static inline void get_rx_err_stats(struct osi_rx_desc *rx_desc, - struct osi_pkt_err_stats *pkt_err_stats) -{ - /* increment rx crc if we see CE bit set */ - if ((rx_desc->rdes3 & RDES3_ERR_CRC) == RDES3_ERR_CRC) { - pkt_err_stats->rx_crc_error = - osi_update_stats_counter( - pkt_err_stats->rx_crc_error, - 1UL); - } - - /* increment rx frame error if we see RE bit set */ - if ((rx_desc->rdes3 & RDES3_ERR_RE) == RDES3_ERR_RE) { - pkt_err_stats->rx_frame_error = - osi_update_stats_counter( - pkt_err_stats->rx_frame_error, - 1UL); - } -} - /** * @brief validate_rx_completions_arg- Validate input argument of rx_completions * @@ -97,34 +58,39 @@ static inline void get_rx_err_stats(struct osi_rx_desc *rx_desc, static inline nve32_t validate_rx_completions_arg( struct osi_dma_priv_data *osi_dma, nveu32_t chan, - nveu32_t *more_data_avail, + const nveu32_t *const more_data_avail, struct osi_rx_ring **rx_ring, struct osi_rx_pkt_cx **rx_pkt_cx) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; if (osi_unlikely((osi_dma == OSI_NULL) || (more_data_avail == OSI_NULL) || - (chan >= l_dma->max_chans))) { - return -1; + (chan >= l_dma->num_max_chans))) { + ret = -1; + goto fail; } *rx_ring = osi_dma->rx_ring[chan]; if (osi_unlikely(*rx_ring == OSI_NULL)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "validate_input_rx_completions: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } *rx_pkt_cx = &(*rx_ring)->rx_pkt_cx; if (osi_unlikely(*rx_pkt_cx == OSI_NULL)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "validate_input_rx_completions: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } - return 0; +fail: + return ret; } nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, @@ -139,34 +105,42 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, struct osi_rx_desc *context_desc = OSI_NULL; nveu32_t ip_type = osi_dma->mac; nve32_t received = 0; +#ifndef OSI_STRIPPED_LIB nve32_t received_resv = 0; +#endif /* !OSI_STRIPPED_LIB */ nve32_t ret = 0; ret = validate_rx_completions_arg(osi_dma, chan, more_data_avail, &rx_ring, &rx_pkt_cx); if (osi_unlikely(ret < 0)) { - return ret; + received = -1; + goto fail; } if (rx_ring->cur_rx_idx >= osi_dma->rx_ring_sz) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid cur_rx_idx\n", 0ULL); - return -1; + received = -1; + goto fail; } /* Reset flag to indicate if more Rx frames available to OSD layer */ *more_data_avail = OSI_NONE; - while ((received < budget) && (received_resv < budget)) { - osi_memset(rx_pkt_cx, 0U, sizeof(*rx_pkt_cx)); + while ((received < budget) +#ifndef OSI_STRIPPED_LIB + && (received_resv < budget) +#endif /* !OSI_STRIPPED_LIB */ + ) { rx_desc = rx_ring->rx_desc + rx_ring->cur_rx_idx; - rx_swcx = rx_ring->rx_swcx + rx_ring->cur_rx_idx; /* check for data availability */ if ((rx_desc->rdes3 & RDES3_OWN) == RDES3_OWN) { break; } -#ifdef OSI_DEBUG + rx_swcx = rx_ring->rx_swcx + rx_ring->cur_rx_idx; + osi_memset(rx_pkt_cx, 0U, sizeof(*rx_pkt_cx)); +#if defined OSI_DEBUG && !defined OSI_STRIPPED_LIB if (osi_dma->enable_desc_dump == 1U) { desc_dump(osi_dma, rx_ring->cur_rx_idx, rx_ring->cur_rx_idx, RX_DESC_DUMP, chan); @@ -175,6 +149,7 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, INCR_RX_DESC_INDEX(rx_ring->cur_rx_idx, osi_dma->rx_ring_sz); +#ifndef OSI_STRIPPED_LIB if (osi_unlikely(rx_swcx->buf_virt_addr == osi_dma->resv_buf_virt_addr)) { rx_swcx->buf_virt_addr = OSI_NULL; @@ -187,6 +162,7 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, } continue; } +#endif /* !OSI_STRIPPED_LIB */ /* packet already processed */ if ((rx_swcx->flags & OSI_RX_SWCX_PROCESSED) == @@ -227,19 +203,22 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, * are set */ rx_pkt_cx->flags &= ~OSI_PKT_CX_VALID; +#ifndef OSI_STRIPPED_LIB d_ops[ip_type].update_rx_err_stats(rx_desc, &osi_dma->pkt_err_stats); +#endif /* !OSI_STRIPPED_LIB */ } /* Check if COE Rx checksum is valid */ d_ops[ip_type].get_rx_csum(rx_desc, rx_pkt_cx); +#ifndef OSI_STRIPPED_LIB /* Get Rx VLAN from descriptor */ d_ops[ip_type].get_rx_vlan(rx_desc, rx_pkt_cx); /* get_rx_hash for RSS */ d_ops[ip_type].get_rx_hash(rx_desc, rx_pkt_cx); - +#endif /* !OSI_STRIPPED_LIB */ context_desc = rx_ring->rx_desc + rx_ring->cur_rx_idx; /* Get rx time stamp */ ret = d_ops[ip_type].get_rx_hwstamp(osi_dma, rx_desc, @@ -273,21 +252,25 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, osi_dma->rx_buf_len, rx_pkt_cx, rx_swcx); } else { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid function pointer\n", 0ULL); - return -1; + received = -1; + goto fail; } } +#ifndef OSI_STRIPPED_LIB osi_dma->dstats.q_rx_pkt_n[chan] = osi_update_stats_counter( osi_dma->dstats.q_rx_pkt_n[chan], 1UL); osi_dma->dstats.rx_pkt_n = osi_update_stats_counter(osi_dma->dstats.rx_pkt_n, 1UL); +#endif /* !OSI_STRIPPED_LIB */ received++; } +#ifndef OSI_STRIPPED_LIB /* If budget is done, check if HW ring still has unprocessed * Rx packets, so that the OSD layer can decide to schedule * this function again. @@ -304,10 +287,13 @@ nve32_t osi_process_rx_completions(struct osi_dma_priv_data *osi_dma, *more_data_avail = OSI_ENABLE; } } +#endif /* !OSI_STRIPPED_LIB */ +fail: return received; } +#ifndef OSI_STRIPPED_LIB /** * @brief inc_tx_pkt_stats - Increment Tx packet count Stats * @@ -437,7 +423,6 @@ static inline void get_tx_err_stats(struct osi_tx_desc *tx_desc, } } -#ifndef OSI_STRIPPED_LIB nve32_t osi_clear_tx_pkt_err_stats(struct osi_dma_priv_data *osi_dma) { nve32_t ret = -1; @@ -509,23 +494,26 @@ static inline nve32_t validate_tx_completions_arg( nveu32_t chan, struct osi_tx_ring **tx_ring) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + const struct dma_local *const l_dma = (struct dma_local *)(void *)osi_dma; + nve32_t ret = 0; if (osi_unlikely((osi_dma == OSI_NULL) || - (chan >= l_dma->max_chans))) { - return -1; + (chan >= l_dma->num_max_chans))) { + ret = -1; + goto fail; } *tx_ring = osi_dma->tx_ring[chan]; if (osi_unlikely(*tx_ring == OSI_NULL)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "validate_tx_completions_arg: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } - - return 0; +fail: + return ret; } /** @@ -538,15 +526,15 @@ static inline nve32_t validate_tx_completions_arg( * @retval 1 if condition is true * @retval 0 if condition is false. */ -static inline unsigned int is_ptp_twostep_or_slave_mode(unsigned int ptp_flag) +static inline nveu32_t is_ptp_twostep_or_slave_mode(nveu32_t ptp_flag) { return (((ptp_flag & OSI_PTP_SYNC_SLAVE) == OSI_PTP_SYNC_SLAVE) || ((ptp_flag & OSI_PTP_SYNC_TWOSTEP) == OSI_PTP_SYNC_TWOSTEP)) ? OSI_ENABLE : OSI_DISABLE; } -int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, - unsigned int chan, int budget) +nve32_t osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, + nveu32_t chan, nve32_t budget) { struct osi_tx_ring *tx_ring = OSI_NULL; struct osi_txdone_pkt_cx *txdone_pkt_cx = OSI_NULL; @@ -560,15 +548,17 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, ret = validate_tx_completions_arg(osi_dma, chan, &tx_ring); if (osi_unlikely(ret < 0)) { - return ret; + processed = -1; + goto fail; } txdone_pkt_cx = &tx_ring->txdone_pkt_cx; entry = tx_ring->clean_idx; +#ifndef OSI_STRIPPED_LIB osi_dma->dstats.tx_clean_n[chan] = osi_update_stats_counter(osi_dma->dstats.tx_clean_n[chan], 1U); - +#endif /* !OSI_STRIPPED_LIB */ while ((entry != tx_ring->cur_tx_idx) && (entry < osi_dma->tx_ring_sz) && (processed < budget)) { osi_memset(txdone_pkt_cx, 0U, sizeof(*txdone_pkt_cx)); @@ -592,11 +582,15 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, if (((tx_desc->tdes3 & TDES3_ES_BITS) != 0U) && (osi_dma->mac != OSI_MAC_HW_MGBE)) { txdone_pkt_cx->flags |= OSI_TXDONE_CX_ERROR; +#ifndef OSI_STRIPPED_LIB /* fill packet error stats */ get_tx_err_stats(tx_desc, &osi_dma->pkt_err_stats); +#endif /* !OSI_STRIPPED_LIB */ } else { +#ifndef OSI_STRIPPED_LIB inc_tx_pkt_stats(osi_dma, chan); +#endif /* !OSI_STRIPPED_LIB */ } if (processed < INT_MAX) { @@ -659,10 +653,11 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, tx_swcx, txdone_pkt_cx); } else { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid function pointer\n", 0ULL); - return -1; + processed = -1; + goto fail; } tx_desc->tdes3 = 0; @@ -674,6 +669,7 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, tx_swcx->buf_virt_addr = OSI_NULL; tx_swcx->buf_phy_addr = 0; tx_swcx->flags = 0; + tx_swcx->data_idx = 0; INCR_TX_DESC_INDEX(entry, osi_dma->tx_ring_sz); /* Don't wait to update tx_ring->clean-idx. It will @@ -684,6 +680,7 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, tx_ring->clean_idx = entry; } +fail: return processed; } @@ -712,18 +709,17 @@ int osi_process_tx_completions(struct osi_dma_priv_data *osi_dma, * @retval 1 - cntx desc used. */ -static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx, - struct osi_tx_swcx *tx_swcx, - struct osi_tx_desc *tx_desc, - unsigned int ptp_sync_flag, - unsigned int mac) +static inline nve32_t need_cntx_desc(const struct osi_tx_pkt_cx *const tx_pkt_cx, + struct osi_tx_swcx *tx_swcx, + struct osi_tx_desc *tx_desc, + nveu32_t ptp_sync_flag, + nveu32_t mac) { nve32_t ret = 0; if (((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) || ((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) || ((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP)) { - if ((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) { /* Set context type */ tx_desc->tdes3 |= TDES3_CTXT; @@ -750,24 +746,22 @@ static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx, /* This part of code must be at the end of function */ if ((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP) { - if ((mac == OSI_MAC_HW_EQOS) && - ((ptp_sync_flag & OSI_PTP_SYNC_TWOSTEP) == - OSI_PTP_SYNC_TWOSTEP)){ - /* return the current ret value */ - return ret; - } + if (((mac == OSI_MAC_HW_EQOS) && + ((ptp_sync_flag & OSI_PTP_SYNC_TWOSTEP) == OSI_PTP_SYNC_TWOSTEP))) { + /* Doing nothing */ + } else { + /* Set context type */ + tx_desc->tdes3 |= TDES3_CTXT; + /* in case of One-step sync */ + if ((ptp_sync_flag & OSI_PTP_SYNC_ONESTEP) == + OSI_PTP_SYNC_ONESTEP) { + /* Set TDES3_OSTC */ + tx_desc->tdes3 |= TDES3_OSTC; + tx_desc->tdes3 &= ~TDES3_TCMSSV; + } - /* Set context type */ - tx_desc->tdes3 |= TDES3_CTXT; - /* in case of One-step sync */ - if ((ptp_sync_flag & OSI_PTP_SYNC_ONESTEP) == - OSI_PTP_SYNC_ONESTEP) { - /* Set TDES3_OSTC */ - tx_desc->tdes3 |= TDES3_OSTC; - tx_desc->tdes3 &= ~TDES3_TCMSSV; + ret = 1; } - - ret = 1; } } @@ -784,7 +778,7 @@ static inline nve32_t need_cntx_desc(struct osi_tx_pkt_cx *tx_pkt_cx, * @retval 1 if condition is true * @retval 0 if condition is false. */ -static inline unsigned int is_ptp_onestep_and_master_mode(unsigned int ptp_flag) +static inline nveu32_t is_ptp_onestep_and_master_mode(nveu32_t ptp_flag) { return (((ptp_flag & OSI_PTP_SYNC_MASTER) == OSI_PTP_SYNC_MASTER) && ((ptp_flag & OSI_PTP_SYNC_ONESTEP) == OSI_PTP_SYNC_ONESTEP)) ? @@ -813,11 +807,19 @@ static inline unsigned int is_ptp_onestep_and_master_mode(unsigned int ptp_flag) * @param[in, out] tx_desc: Pointer to transmit descriptor to be filled. * @param[in] tx_swcx: Pointer to corresponding tx descriptor software context. */ +#ifndef OSI_STRIPPED_LIB static inline void fill_first_desc(struct osi_tx_ring *tx_ring, struct osi_tx_pkt_cx *tx_pkt_cx, struct osi_tx_desc *tx_desc, struct osi_tx_swcx *tx_swcx, - unsigned int ptp_flag) + nveu32_t ptp_flag) +#else +static inline void fill_first_desc(OSI_UNUSED struct osi_tx_ring *tx_ring, + struct osi_tx_pkt_cx *tx_pkt_cx, + struct osi_tx_desc *tx_desc, + struct osi_tx_swcx *tx_swcx, + nveu32_t ptp_flag) +#endif /* !OSI_STRIPPED_LIB */ { tx_desc->tdes0 = L32(tx_swcx->buf_phy_addr); tx_desc->tdes1 = H32(tx_swcx->buf_phy_addr); @@ -876,6 +878,7 @@ static inline void fill_first_desc(struct osi_tx_ring *tx_ring, tx_desc->tdes3 &= ~TDES3_TPL_MASK; tx_desc->tdes3 |= tx_pkt_cx->payload_len; } else { +#ifndef OSI_STRIPPED_LIB if ((tx_ring->slot_check == OSI_ENABLE) && (tx_ring->slot_number < OSI_SLOT_NUM_MAX)) { /* Fill Slot number */ @@ -884,6 +887,7 @@ static inline void fill_first_desc(struct osi_tx_ring *tx_ring, tx_ring->slot_number = ((tx_ring->slot_number + 1U) % OSI_SLOT_NUM_MAX); } +#endif /* !OSI_STRIPPED_LIB */ } } @@ -921,55 +925,64 @@ static inline void dmb_oshst(void) * @retval 0 on success * @retval -1 on failure. */ -static inline nve32_t validate_ctx(struct osi_dma_priv_data *osi_dma, - struct osi_tx_pkt_cx *tx_pkt_cx) +static inline nve32_t validate_ctx(const struct osi_dma_priv_data *const osi_dma, + const struct osi_tx_pkt_cx *const tx_pkt_cx) { + nve32_t ret = 0; + + (void) osi_dma; if ((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) { if (osi_unlikely((tx_pkt_cx->tcp_udp_hdrlen / OSI_TSO_HDR_LEN_DIVISOR) > TDES3_THL_MASK)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid TSO header len\n", (nveul64_t)tx_pkt_cx->tcp_udp_hdrlen); + ret = -1; goto fail; } else if (osi_unlikely(tx_pkt_cx->payload_len > TDES3_TPL_MASK)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid TSO payload len\n", (nveul64_t)tx_pkt_cx->payload_len); + ret = -1; goto fail; } else if (osi_unlikely(tx_pkt_cx->mss > TDES2_MSS_MASK)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid MSS\n", (nveul64_t)tx_pkt_cx->mss); + ret = -1; goto fail; + } else { + /* empty statement */ } } else if ((tx_pkt_cx->flags & OSI_PKT_CX_LEN) == OSI_PKT_CX_LEN) { if (osi_unlikely(tx_pkt_cx->payload_len > TDES3_PL_MASK)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid frame len\n", (nveul64_t)tx_pkt_cx->payload_len); + ret = -1; goto fail; } + } else { + /* empty statement */ } if (osi_unlikely(tx_pkt_cx->vtag_id > TDES3_VT_MASK)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid VTAG_ID\n", (nveul64_t)tx_pkt_cx->vtag_id); - goto fail; + ret = -1; } - return 0; fail: - return -1; + return ret; } nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, struct osi_tx_ring *tx_ring, - struct dma_chan_ops *ops, - nveu32_t chan) + nveu32_t dma_chan) { - struct dma_local *l_dma = (struct dma_local *)osi_dma; + struct dma_local *l_dma = (struct dma_local *)(void *)osi_dma; struct osi_tx_pkt_cx *tx_pkt_cx = OSI_NULL; struct osi_tx_desc *first_desc = OSI_NULL; struct osi_tx_desc *last_desc = OSI_NULL; @@ -980,18 +993,25 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, nveu32_t f_idx = tx_ring->cur_tx_idx; nveu32_t l_idx = 0; #endif /* OSI_DEBUG */ + nveu32_t chan = dma_chan & 0xFU; + const nveu32_t tail_ptr_reg[2] = { + EQOS_DMA_CHX_TDTP(chan), + MGBE_DMA_CHX_TDTLP(chan) + }; nve32_t cntx_desc_consumed; nveu32_t pkt_id = 0x0U; nveu32_t desc_cnt = 0U; nveu64_t tailptr; nveu32_t entry = 0U; + nve32_t ret = 0; nveu32_t i; entry = tx_ring->cur_tx_idx; if (entry >= osi_dma->tx_ring_sz) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid cur_tx_idx\n", 0ULL); - return -1; + ret = -1; + goto fail; } tx_desc = tx_ring->tx_desc + entry; @@ -1001,15 +1021,18 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, desc_cnt = tx_pkt_cx->desc_cnt; if (osi_unlikely(desc_cnt == 0U)) { /* Will not hit this case */ - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid desc_cnt\n", 0ULL); - return -1; + ret = -1; + goto fail; } if (validate_ctx(osi_dma, tx_pkt_cx) < 0) { - return -1; + ret = -1; + goto fail; } +#ifndef OSI_STRIPPED_LIB /* Context descriptor for VLAN/TSO */ if ((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) { osi_dma->dstats.tx_vlan_pkt_n = @@ -1022,6 +1045,7 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, osi_update_stats_counter(osi_dma->dstats.tx_tso_pkt_n, 1UL); } +#endif /* !OSI_STRIPPED_LIB */ cntx_desc_consumed = need_cntx_desc(tx_pkt_cx, tx_swcx, tx_desc, osi_dma->ptp_flag, osi_dma->mac); @@ -1124,7 +1148,9 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, * We need to make sure Tx descriptor updated above is really updated * before setting up the DMA, hence add memory write barrier here. */ - dmb_oshst(); + if (tx_ring->skip_dmb == 0U) { + dmb_oshst(); + } #ifdef OSI_DEBUG if (osi_dma->enable_desc_dump == 1U) { @@ -1138,9 +1164,10 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, (entry * sizeof(struct osi_tx_desc)); if (osi_unlikely(tailptr < tx_ring->tx_desc_phy_addr)) { /* Will not hit this case */ - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid tx_desc_phy_addr\n", 0ULL); - return -1; + ret = -1; + goto fail; } /* @@ -1149,9 +1176,11 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, */ tx_ring->cur_tx_idx = entry; - ops->update_tx_tailptr(osi_dma->base, chan, tailptr); + /* Update the Tx tail pointer */ + osi_writel(L32(tailptr), (nveu8_t *)osi_dma->base + tail_ptr_reg[osi_dma->mac]); - return 0; +fail: + return ret; } /** @@ -1176,22 +1205,37 @@ nve32_t hw_transmit(struct osi_dma_priv_data *osi_dma, * @retval 0 on success * @retval -1 on failure. */ -static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma, - nveu32_t chan, - struct dma_chan_ops *ops) +static nve32_t rx_dma_desc_initialization(const struct osi_dma_priv_data *const osi_dma, + nveu32_t dma_chan) { + nveu32_t chan = dma_chan & 0xFU; + const nveu32_t start_addr_high_reg[2] = { + EQOS_DMA_CHX_RDLH(chan), + MGBE_DMA_CHX_RDLH(chan) + }; + const nveu32_t start_addr_low_reg[2] = { + EQOS_DMA_CHX_RDLA(chan), + MGBE_DMA_CHX_RDLA(chan) + }; + const nveu32_t ring_len_reg[2] = { + EQOS_DMA_CHX_RDRL(chan), + MGBE_DMA_CHX_RX_CNTRL2(chan) + }; + const nveu32_t mask[2] = { 0x3FFU, 0x3FFFU }; struct osi_rx_ring *rx_ring = OSI_NULL; struct osi_rx_desc *rx_desc = OSI_NULL; struct osi_rx_swcx *rx_swcx = OSI_NULL; nveu64_t tailptr = 0; - nveu32_t i; nve32_t ret = 0; + nveu32_t val; + nveu32_t i; rx_ring = osi_dma->rx_ring[chan]; if (osi_unlikely(rx_ring == OSI_NULL)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid argument\n", 0ULL); - return -1; + ret = -1; + goto fail; }; rx_ring->cur_rx_idx = 0; @@ -1239,16 +1283,26 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma, if (osi_unlikely((tailptr < rx_ring->rx_desc_phy_addr))) { /* Will not hit this case */ - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid phys address\n", 0ULL); - return -1; + ret = -1; + goto fail; } - ops->set_rx_ring_len(osi_dma, chan, (osi_dma->rx_ring_sz - 1U)); - ops->update_rx_tailptr(osi_dma->base, chan, tailptr); - ops->set_rx_ring_start_addr(osi_dma->base, chan, - rx_ring->rx_desc_phy_addr); + /* Update the HW DMA ring length */ + val = osi_readl((nveu8_t *)osi_dma->base + ring_len_reg[osi_dma->mac]); + val |= (osi_dma->rx_ring_sz - 1U) & mask[osi_dma->mac]; + osi_writel(val, (nveu8_t *)osi_dma->base + ring_len_reg[osi_dma->mac]); + + update_rx_tail_ptr(osi_dma, chan, tailptr); + + /* Program Ring start address */ + osi_writel(H32(rx_ring->rx_desc_phy_addr), + (nveu8_t *)osi_dma->base + start_addr_high_reg[osi_dma->mac]); + osi_writel(L32(rx_ring->rx_desc_phy_addr), + (nveu8_t *)osi_dma->base + start_addr_low_reg[osi_dma->mac]); +fail: return ret; } @@ -1273,25 +1327,58 @@ static nve32_t rx_dma_desc_initialization(struct osi_dma_priv_data *osi_dma, * @retval 0 on success * @retval -1 on failure. */ -static nve32_t rx_dma_desc_init(struct osi_dma_priv_data *osi_dma, - struct dma_chan_ops *ops) +static nve32_t rx_dma_desc_init(struct osi_dma_priv_data *osi_dma) { nveu32_t chan = 0; - nveu32_t i; nve32_t ret = 0; + nveu32_t i; for (i = 0; i < osi_dma->num_dma_chans; i++) { chan = osi_dma->dma_chans[i]; - ret = rx_dma_desc_initialization(osi_dma, chan, ops); + ret = rx_dma_desc_initialization(osi_dma, chan); if (ret != 0) { - return ret; + goto fail; } } +fail: return ret; } +static inline void set_tx_ring_len_and_start_addr(const struct osi_dma_priv_data *const osi_dma, + nveu64_t tx_desc_phy_addr, + nveu32_t dma_chan, + nveu32_t len) +{ + nveu32_t chan = dma_chan & 0xFU; + const nveu32_t ring_len_reg[2] = { + EQOS_DMA_CHX_TDRL(chan), + MGBE_DMA_CHX_TX_CNTRL2(chan) + }; + const nveu32_t start_addr_high_reg[2] = { + EQOS_DMA_CHX_TDLH(chan), + MGBE_DMA_CHX_TDLH(chan) + }; + const nveu32_t start_addr_low_reg[2] = { + EQOS_DMA_CHX_TDLA(chan), + MGBE_DMA_CHX_TDLA(chan) + }; + const nveu32_t mask[2] = { 0x3FFU, 0x3FFFU }; + nveu32_t val; + + /* Program ring length */ + val = osi_readl((nveu8_t *)osi_dma->base + ring_len_reg[osi_dma->mac]); + val |= len & mask[osi_dma->mac]; + osi_writel(val, (nveu8_t *)osi_dma->base + ring_len_reg[osi_dma->mac]); + + /* Program tx ring start address */ + osi_writel(H32(tx_desc_phy_addr), + (nveu8_t *)osi_dma->base + start_addr_high_reg[osi_dma->mac]); + osi_writel(L32(tx_desc_phy_addr), + (nveu8_t *)osi_dma->base + start_addr_low_reg[osi_dma->mac]); +} + /** * @brief tx_dma_desc_init - Initialize DMA Transmit descriptors. * @@ -1312,13 +1399,13 @@ static nve32_t rx_dma_desc_init(struct osi_dma_priv_data *osi_dma, * @retval 0 on success * @retval -1 on failure. */ -static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma, - struct dma_chan_ops *ops) +static nve32_t tx_dma_desc_init(const struct osi_dma_priv_data *const osi_dma) { struct osi_tx_ring *tx_ring = OSI_NULL; struct osi_tx_desc *tx_desc = OSI_NULL; struct osi_tx_swcx *tx_swcx = OSI_NULL; nveu32_t chan = 0; + nve32_t ret = 0; nveu32_t i, j; for (i = 0; i < osi_dma->num_dma_chans; i++) { @@ -1326,9 +1413,10 @@ static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma, tx_ring = osi_dma->tx_ring[chan]; if (osi_unlikely(tx_ring == OSI_NULL)) { - OSI_DMA_ERR(OSI_NULL, OSI_LOG_ARG_INVALID, + OSI_DMA_ERR(osi_dma->osd, OSI_LOG_ARG_INVALID, "dma_txrx: Invalid pointers\n", 0ULL); - return -1; + ret = -1; + goto fail; } for (j = 0; j < osi_dma->tx_ring_sz; j++) { @@ -1349,46 +1437,47 @@ static nve32_t tx_dma_desc_init(struct osi_dma_priv_data *osi_dma, tx_ring->cur_tx_idx = 0; tx_ring->clean_idx = 0; +#ifndef OSI_STRIPPED_LIB /* Slot function parameter initialization */ tx_ring->slot_number = 0U; tx_ring->slot_check = OSI_DISABLE; +#endif /* !OSI_STRIPPED_LIB */ - ops->set_tx_ring_len(osi_dma, chan, - (osi_dma->tx_ring_sz - 1U)); - ops->set_tx_ring_start_addr(osi_dma->base, chan, - tx_ring->tx_desc_phy_addr); + set_tx_ring_len_and_start_addr(osi_dma, tx_ring->tx_desc_phy_addr, + chan, (osi_dma->tx_ring_sz - 1U)); } - return 0; +fail: + return ret; } -nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma, - struct dma_chan_ops *ops) +nve32_t dma_desc_init(struct osi_dma_priv_data *osi_dma) { nve32_t ret = 0; - ret = tx_dma_desc_init(osi_dma, ops); + ret = tx_dma_desc_init(osi_dma); if (ret != 0) { - return ret; + goto fail; } - ret = rx_dma_desc_init(osi_dma, ops); + ret = rx_dma_desc_init(osi_dma); if (ret != 0) { - return ret; + goto fail; } +fail: return ret; } -nve32_t init_desc_ops(struct osi_dma_priv_data *osi_dma) +nve32_t init_desc_ops(const struct osi_dma_priv_data *const osi_dma) { - typedef void (*desc_ops_arr)(struct desc_ops *); + typedef void (*desc_ops_arr)(struct desc_ops *p_ops); - desc_ops_arr desc_ops[2] = { + const desc_ops_arr desc_ops_a[2] = { eqos_init_desc_ops, mgbe_init_desc_ops }; - desc_ops[osi_dma->mac](&d_ops[osi_dma->mac]); + desc_ops_a[osi_dma->mac](&d_ops[osi_dma->mac]); /* TODO: validate function pointers */ return 0; diff --git a/kernel/nvethernetrm/osi/dma/staticlib/Makefile.interface.tmk b/kernel/nvethernetrm/osi/dma/staticlib/Makefile.interface.tmk new file mode 100644 index 0000000000..c17d0bb0f7 --- /dev/null +++ b/kernel/nvethernetrm/osi/dma/staticlib/Makefile.interface.tmk @@ -0,0 +1,38 @@ +################################### tell Emacs this is a -*- makefile-gmake -*- +# +# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +# libnvethernetcl interface makefile fragment +# +############################################################################### + +ifdef NV_INTERFACE_FLAG_STATIC_LIBRARY_SECTION +NV_INTERFACE_NAME := nvethernetcl +NV_INTERFACE_COMPONENT_DIR := . +NV_INTERFACE_PUBLIC_INCLUDES := \ + ./include +endif + +# Local Variables: +# indent-tabs-mode: t +# tab-width: 8 +# End: +# vi: set tabstop=8 noexpandtab: diff --git a/kernel/nvethernetrm/osi/dma/staticlib/Makefile.tmk b/kernel/nvethernetrm/osi/dma/staticlib/Makefile.tmk new file mode 100644 index 0000000000..04291430f0 --- /dev/null +++ b/kernel/nvethernetrm/osi/dma/staticlib/Makefile.tmk @@ -0,0 +1,54 @@ +################################### tell Emacs this is a -*- makefile-gmake -*- +# +# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +############################################################################### + +ifdef NV_COMPONENT_FLAG_STATIC_LIBRARY_SECTION +include $(NV_BUILD_START_COMPONENT) + +NV_COMPONENT_STRICT_WARNINGS_qnx_64 := 1 + +NV_COMPONENT_NAME := nvethernetcl +NV_COMPONENT_OWN_INTERFACE_DIR := . +NV_COMPONENT_SOURCES := \ + $(NV_SOURCE)/nvethernetrm/osi/dma/eqos_dma.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/osi_dma.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/osi_dma_txrx.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/mgbe_dma.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/eqos_desc.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/mgbe_desc.c \ + $(NV_SOURCE)/nvethernetrm/osi/dma/debug.c \ + $(NV_SOURCE)/nvethernetrm/osi/common/osi_common.c \ + $(NV_SOURCE)/nvethernetrm/osi/common/eqos_common.c \ + $(NV_SOURCE)/nvethernetrm/osi/common/mgbe_common.c + +NV_COMPONENT_INCLUDES := \ + $(NV_SOURCE)/nvethernetrm/include \ + $(NV_SOURCE)/nvethernetrm/osi/common/include + +ifeq ($(NV_BUILD_CONFIGURATION_IS_SAFETY),0) + NV_COMPONENT_CFLAGS += -DOSI_DEBUG +else + NV_COMPONENT_CFLAGS += -DOSI_STRIPPED_LIB +endif +include $(NV_BUILD_STATIC_LIBRARY) +endif diff --git a/kernel/nvgpu/drivers/gpu/nvgpu/os/linux/ioctl.c b/kernel/nvgpu/drivers/gpu/nvgpu/os/linux/ioctl.c index 5539d835c9..44b61ca08a 100644 --- a/kernel/nvgpu/drivers/gpu/nvgpu/os/linux/ioctl.c +++ b/kernel/nvgpu/drivers/gpu/nvgpu/os/linux/ioctl.c @@ -211,7 +211,11 @@ static char *nvgpu_devnode(const char *cdev_name) return kasprintf(GFP_KERNEL, "nvhost-%s-gpu", cdev_name); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) static char *nvgpu_pci_devnode(struct device *dev, umode_t *mode) +#else +static char *nvgpu_pci_devnode(const struct device *dev, umode_t *mode) +#endif { /* Special case to maintain legacy names */ if (strcmp(dev_name(dev), "channel") == 0) { @@ -223,18 +227,30 @@ static char *nvgpu_pci_devnode(struct device *dev, umode_t *mode) dev_name(dev->parent), dev_name(dev)); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) static char *nvgpu_devnode_v2(struct device *dev, umode_t *mode) +#else +static char *nvgpu_devnode_v2(const struct device *dev, umode_t *mode) +#endif { return kasprintf(GFP_KERNEL, "nvgpu/igpu0/%s", dev_name(dev)); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) static char *nvgpu_pci_devnode_v2(struct device *dev, umode_t *mode) +#else +static char *nvgpu_pci_devnode_v2(const struct device *dev, umode_t *mode) +#endif { return kasprintf(GFP_KERNEL, "nvgpu/dgpu-%s/%s", dev_name(dev->parent), dev_name(dev)); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) static char *nvgpu_mig_fgpu_devnode(struct device *dev, umode_t *mode) +#else +static char *nvgpu_mig_fgpu_devnode(const struct device *dev, umode_t *mode) +#endif { struct nvgpu_cdev_class_priv_data *priv_data; diff --git a/kernel/nvgpu/drivers/gpu/nvgpu/os/linux/module.c b/kernel/nvgpu/drivers/gpu/nvgpu/os/linux/module.c index 1f94bd99d1..1e322344d3 100644 --- a/kernel/nvgpu/drivers/gpu/nvgpu/os/linux/module.c +++ b/kernel/nvgpu/drivers/gpu/nvgpu/os/linux/module.c @@ -1,7 +1,7 @@ /* * GK20A Graphics * - * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -153,8 +153,17 @@ static int nvgpu_kernel_shutdown_notification(struct notifier_block *nb, struct nvgpu_gr *gr = nvgpu_gr_get_cur_instance_ptr(g); nvgpu_set_enabled(g, NVGPU_KERNEL_IS_DYING, true); - /* signal the gr wait */ - nvgpu_cond_signal(&gr->init_wq); + + /* + * In rmmod path, when the kernel or GPU driver is + * dying signal the gr wait queue so that the wait + * queue wakes up and further processing happens. + * This is needed to prevent other threads, like + * pmu_pg_task, to go into un-interruptible state. + */ + if (gr != NULL) { + nvgpu_cond_signal(&gr->init_wq); + } return NOTIFY_DONE; } @@ -1589,9 +1598,16 @@ void nvgpu_start_gpu_idle(struct gk20a *g) down_write(&l->busy_lock); nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true); - - /* signal the gr wait */ - nvgpu_cond_signal(&gr->init_wq); + /* + * In rmmod path, when the kernel or GPU driver is + * dying signal the gr wait queue so that the wait + * queue wakes up and further processing happens. + * This is needed to prevent other threads, like + * pmu_pg_task, to go into un-interruptible state. + */ + if (gr != NULL) { + nvgpu_cond_signal(&gr->init_wq); + } /* * GR SW ready needs to be invalidated at this time with the busy lock * held to prevent a racing condition on the gr/mm code diff --git a/kernel/nvidia/drivers/Makefile b/kernel/nvidia/drivers/Makefile index a0a9038c50..3196488919 100644 --- a/kernel/nvidia/drivers/Makefile +++ b/kernel/nvidia/drivers/Makefile @@ -1,4 +1,4 @@ -# Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -12,6 +12,7 @@ subdir-ccflags-y += -Werror obj-$(CONFIG_RTK_BTUSB) += bluetooth/realtek/ +obj-$(CONFIG_HWMON) += hwmon/ obj-$(CONFIG_NVPMODEL_EMC) += nvpmodel/ obj-$(CONFIG_TEGRA_RDMA) += nv-p2p/ obj-$(CONFIG_NVPPS) += nvpps/ diff --git a/kernel/nvidia/drivers/crypto/tegra-se-nvrng.c b/kernel/nvidia/drivers/crypto/tegra-se-nvrng.c index 5dee64aa9c..881b504440 100644 --- a/kernel/nvidia/drivers/crypto/tegra-se-nvrng.c +++ b/kernel/nvidia/drivers/crypto/tegra-se-nvrng.c @@ -3,7 +3,7 @@ * * Support for Tegra NVRNG Engine Error Handling. * - * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2023, NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -232,7 +232,7 @@ static int tegra_se_sc7_check_error(struct tegra_se_nvrng_dev *nvrng_dev, ret = tegra_se_sc7_check_idle(nvrng_dev, SC7_IDLE_TIMEOUT_200MS); if (ret == -ETIMEDOUT) { - pr_err("%s:%d SE HW is not idle, timeout\n", + pr_info("%s:%d SE HW is not idle, timeout\n", __func__, __LINE__); return ret; } diff --git a/kernel/nvidia/drivers/devfreq/governor_v2.h b/kernel/nvidia/drivers/devfreq/governor_v2.h index 0f705b908a..ab39a8619f 100644 --- a/kernel/nvidia/drivers/devfreq/governor_v2.h +++ b/kernel/nvidia/drivers/devfreq/governor_v2.h @@ -8,7 +8,7 @@ * * This header is for devfreq governors in drivers/devfreq/ */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) && LINUX_VERSION_CODE <= KERNEL_VERSION(6, 0, 0) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0) #ifndef _GOVERNOR_H #define _GOVERNOR_H #include @@ -97,4 +97,4 @@ static inline int devfreq_update_stats(struct devfreq *df) return df->profile->get_dev_status(df->dev.parent, &df->last_status); } #endif /* _GOVERNOR_H */ -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) && LINUX_VERSION_CODE <= KERNEL_VERSION(5, 19, 0) */ +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0) */ diff --git a/kernel/nvidia/drivers/gpu/drm/tegra/fb.c b/kernel/nvidia/drivers/gpu/drm/tegra/fb.c index 69d486aa2c..da6253757d 100644 --- a/kernel/nvidia/drivers/gpu/drm/tegra/fb.c +++ b/kernel/nvidia/drivers/gpu/drm/tegra/fb.c @@ -248,7 +248,11 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, if (IS_ERR(bo)) return PTR_ERR(bo); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0) + info = drm_fb_helper_alloc_info(helper); +#else info = drm_fb_helper_alloc_fbi(helper); +#endif if (IS_ERR(info)) { dev_err(drm->dev, "failed to allocate framebuffer info\n"); drm_gem_object_put(&bo->gem); @@ -266,7 +270,11 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, fb = fbdev->fb; helper->fb = fb; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0) + helper->info = info; +#else helper->fbdev = info; +#endif info->fbops = &tegra_fb_ops; @@ -285,7 +293,9 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, } } +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0) drm->mode_config.fb_base = (resource_size_t)bo->iova; +#endif info->screen_base = (void __iomem *)bo->vaddr + offset; info->screen_size = size; info->fix.smem_start = (unsigned long)(bo->iova + offset); @@ -353,7 +363,11 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev, static void tegra_fbdev_exit(struct tegra_fbdev *fbdev) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0) + drm_fb_helper_unregister_info(&fbdev->base); +#else drm_fb_helper_unregister_fbi(&fbdev->base); +#endif if (fbdev->fb) { struct tegra_bo *bo = tegra_fb_get_plane(fbdev->fb, 0); diff --git a/kernel/nvidia/drivers/gpu/host1x-nvhost/nvhost.c b/kernel/nvidia/drivers/gpu/host1x-nvhost/nvhost.c index 3220963071..5da2871589 100644 --- a/kernel/nvidia/drivers/gpu/host1x-nvhost/nvhost.c +++ b/kernel/nvidia/drivers/gpu/host1x-nvhost/nvhost.c @@ -269,7 +269,7 @@ bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *pdev, u32 id) struct nvhost_device_data *pdata = platform_get_drvdata(pdev); struct host1x_syncpt *sp; - if (!pdata || pdata->host1x) + if (!pdata || !pdata->host1x) return -ENODEV; sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id); diff --git a/kernel/nvidia/drivers/gpu/host1x/hw/hw_host1x06_uclass.h b/kernel/nvidia/drivers/gpu/host1x/hw/hw_host1x06_uclass.h index 5f831438d1..50c32de452 100644 --- a/kernel/nvidia/drivers/gpu/host1x/hw/hw_host1x06_uclass.h +++ b/kernel/nvidia/drivers/gpu/host1x/hw/hw_host1x06_uclass.h @@ -53,7 +53,7 @@ static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v) host1x_uclass_incr_syncpt_cond_f(v) static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v) { - return (v & 0xff) << 0; + return (v & 0x3ff) << 0; } #define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \ host1x_uclass_incr_syncpt_indx_f(v) diff --git a/kernel/nvidia/drivers/gpu/host1x/hw/hw_host1x07_uclass.h b/kernel/nvidia/drivers/gpu/host1x/hw/hw_host1x07_uclass.h index 8cd2ef087d..887b878f92 100644 --- a/kernel/nvidia/drivers/gpu/host1x/hw/hw_host1x07_uclass.h +++ b/kernel/nvidia/drivers/gpu/host1x/hw/hw_host1x07_uclass.h @@ -53,7 +53,7 @@ static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v) host1x_uclass_incr_syncpt_cond_f(v) static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v) { - return (v & 0xff) << 0; + return (v & 0x3ff) << 0; } #define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \ host1x_uclass_incr_syncpt_indx_f(v) diff --git a/kernel/nvidia/drivers/gpu/host1x/hw/hw_host1x08_uclass.h b/kernel/nvidia/drivers/gpu/host1x/hw/hw_host1x08_uclass.h index 724cccd71a..4fb1d090ed 100644 --- a/kernel/nvidia/drivers/gpu/host1x/hw/hw_host1x08_uclass.h +++ b/kernel/nvidia/drivers/gpu/host1x/hw/hw_host1x08_uclass.h @@ -53,7 +53,7 @@ static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v) host1x_uclass_incr_syncpt_cond_f(v) static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v) { - return (v & 0xff) << 0; + return (v & 0x3ff) << 0; } #define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \ host1x_uclass_incr_syncpt_indx_f(v) diff --git a/kernel/nvidia/drivers/hid/hid-atv-jarvis.c b/kernel/nvidia/drivers/hid/hid-atv-jarvis.c index 069e8eeffa..cb1ae77466 100644 --- a/kernel/nvidia/drivers/hid/hid-atv-jarvis.c +++ b/kernel/nvidia/drivers/hid/hid-atv-jarvis.c @@ -3,7 +3,7 @@ * providing keys and microphone audio functionality * * Copyright (C) 2014 Google, Inc. - * Copyright (c) 2015-2021 NVIDIA CORPORATION, All rights reserved. + * Copyright (c) 2015-2021,2023 NVIDIA CORPORATION, All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -222,6 +222,9 @@ static char *model[SNDRV_CARDS]; /* = {[0 ... (SNDRV_CARDS - 1)] = NULL}; */ static int pcm_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; static int pcm_substreams[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; +static struct snd_pcm_substream *g_substream; +static struct mutex g_substream_lock; + module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for SHIELD Remote soundcard."); module_param_array(id, charp, NULL, 0444); @@ -1061,18 +1064,21 @@ static int snd_atvr_schedule_timer(struct snd_pcm_substream *substream) return ret; } -static void snd_atvr_timer_callback(unsigned long data) +static void snd_atvr_timer_callback(struct timer_list *timer) { uint readable; uint packets_read; bool need_silence = false; unsigned long flags; - struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data; - struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream); #ifdef DEBUG_TIMER struct timeval t0, t1; int diff; #endif + struct snd_atvr *atvr_snd; + + mutex_lock(&g_substream_lock); + atvr_snd = snd_pcm_substream_chip(g_substream); + mutex_unlock(&g_substream_lock); /* timer_enabled will be false when stopping a stream. */ spin_lock_irqsave(&atvr_snd->timer_lock, flags); @@ -1106,13 +1112,16 @@ static void snd_atvr_timer_callback(unsigned long data) if (readable > 0) { atvr_snd->timer_state = TIMER_STATE_DURING_DECODE; /* Fall through into next state. */ + goto during_decode; } else { need_silence = true; break; } case TIMER_STATE_DURING_DECODE: - packets_read = snd_atvr_decode_from_fifo(substream); +during_decode: mutex_lock(&g_substream_lock); + packets_read = snd_atvr_decode_from_fifo(g_substream); + mutex_unlock(&g_substream_lock); if (packets_read > 0) { /* Defer timeout */ @@ -1153,7 +1162,9 @@ static void snd_atvr_timer_callback(unsigned long data) spin_unlock_irqrestore(&atvr_snd->s_substream_lock, flags); /* This can cause snd_atvr_pcm_trigger() to be called, which * may try to stop the timer. */ - snd_atvr_handle_frame_advance(substream, frames_to_silence); + mutex_lock(&g_substream_lock); + snd_atvr_handle_frame_advance(g_substream, frames_to_silence); + mutex_unlock(&g_substream_lock); } else { #ifdef DEBUG_TIMER do_gettimeofday(&t1); @@ -1168,14 +1179,16 @@ static void snd_atvr_timer_callback(unsigned long data) pr_err("callback took %d ms\n", diff); #endif + mutex_lock(&g_substream_lock); spin_lock_irqsave(&atvr_snd->timer_lock, flags); if (need_silence) silence_counter += 1; else silence_counter = 0; if (atvr_snd->timer_enabled & ATVR_TIMER_ENABLED) - snd_atvr_schedule_timer(substream); + snd_atvr_schedule_timer(g_substream); spin_unlock_irqrestore(&atvr_snd->timer_lock, flags); + mutex_unlock(&g_substream_lock); } static int snd_atvr_timer_start(struct snd_pcm_substream *substream) @@ -1368,9 +1381,11 @@ static int snd_atvr_pcm_open(struct snd_pcm_substream *substream) #ifdef DEBUG_TIMER snd_atvr_log("%s, built %s %s\n", __func__, __DATE__, __TIME__); #endif + mutex_lock(&g_substream_lock); + g_substream = substream; + mutex_unlock(&g_substream_lock); /* Initialize the timer for the opened substream */ - setup_timer(&atvr_snd->decoding_timer, snd_atvr_timer_callback, - (unsigned long)substream); + timer_setup(&atvr_snd->decoding_timer, snd_atvr_timer_callback, 0); return ret; } @@ -1382,6 +1397,9 @@ static int snd_atvr_pcm_close(struct snd_pcm_substream *substream) snd_atvr_timer_stop(substream); del_timer_sync(&atvr_snd->decoding_timer); + mutex_lock(&g_substream_lock); + g_substream = NULL; + mutex_unlock(&g_substream_lock); #ifdef DEBUG_TIMER if (atvr_snd->timer_callback_count > 0) @@ -1558,7 +1576,7 @@ static int atvr_snd_initialize(struct hid_device *hdev, if (err) goto __nodev; /* dummy initialization */ - setup_timer(&atvr_snd->decoding_timer, + timer_setup(&atvr_snd->decoding_timer, snd_atvr_timer_callback, 0); for (i = 0; i < MAX_PCM_DEVICES && i < pcm_devs[dev]; i++) { @@ -2165,12 +2183,8 @@ static const struct hid_device_id atvr_devices[] = { USB_DEVICE_ID_NVIDIA_FRIDAY)}, {HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NVIDIA, USB_DEVICE_ID_NVIDIA_THUNDERSTRIKE)}, - {HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NVIDIA, - USB_DEVICE_ID_NVIDIA_STORMCASTER)}, {HID_USB_DEVICE(USB_VENDOR_ID_NVIDIA, USB_DEVICE_ID_NVIDIA_THUNDERSTRIKE)}, - {HID_USB_DEVICE(USB_VENDOR_ID_NVIDIA, - USB_DEVICE_ID_NVIDIA_STORMCASTER)}, { } }; MODULE_DEVICE_TABLE(hid, atvr_devices); @@ -2296,6 +2310,7 @@ static int atvr_init(void) int ret; mutex_init(&snd_cards_lock); + mutex_init(&g_substream_lock); ret = hid_register_driver(&atvr_driver); if (ret) { pr_err("%s: can't register SHIELD Remote driver\n", diff --git a/kernel/nvidia/drivers/hwmon/Kconfig b/kernel/nvidia/drivers/hwmon/Kconfig new file mode 100644 index 0000000000..c17b5480bc --- /dev/null +++ b/kernel/nvidia/drivers/hwmon/Kconfig @@ -0,0 +1,12 @@ +menu "HWMON devices" + +config SENSORS_F75308 + tristate "F75308 Hardware Monitor" + default n + depends on I2C + help + Say Y to enable F75308 hardware monitor. F75308 hardware monitor + is an I2C slave which can be used to learn the temperature, voltage, + and govern the connected fans by reading/writing the values from/to + the associated registers by an external I2C master. +endmenu diff --git a/kernel/nvidia/drivers/hwmon/Makefile b/kernel/nvidia/drivers/hwmon/Makefile new file mode 100644 index 0000000000..ce01f11a64 --- /dev/null +++ b/kernel/nvidia/drivers/hwmon/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_SENSORS_F75308) += f75308.o diff --git a/kernel/nvidia/drivers/hwmon/f75308.c b/kernel/nvidia/drivers/hwmon/f75308.c new file mode 100644 index 0000000000..538d146fd3 --- /dev/null +++ b/kernel/nvidia/drivers/hwmon/f75308.c @@ -0,0 +1,1303 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: GPL-2.0 + */ + +#include +#include +#include +#include +#include +#include + +#define DEVICE_NAME "f75308" +#define DEVICE_VID_ADDR 0xC0 +#define DEVICE_PID_ADDR 0xC2 + +#define DEVICE_VID 0x1934 + +#define DEVICE_PID_64PIN 0x1012 +#define DEVICE_PID_48PIN 0x1022 +#define DEVICE_PID_28PIN 0x1032 + +#define F75308_REG_BANK 0x00 + +/* BANK-0 */ +#define F75308_REG_VOLT(nr) (0x30 + (nr)) /* 0 ~ 14 */ +#define F75308_REG_TEMP_READ(nr) (0x40 + (nr * 2)) /* 0 ~ 6 */ +#define F75308_REG_FAN_READ(nr) (0x80 + (nr * 2)) /* 0 ~ 14 */ + +#define F75308_MAX_FAN_IN 14 +#define F75308_MAX_FAN_CTRL_CNT 11 +#define F75308_MAX_FAN_SEG_CNT 5 + +enum chip { + f75308a_28, + f75308b_48, + f75308c_64, +}; + +struct f75308_priv { + struct mutex locker; + struct i2c_client *client; + struct device *hwmon_dev; + enum chip chip_id; +}; + +static ssize_t f75308_show_temp(struct device *dev, + struct device_attribute *devattr, char *buf); + +static ssize_t f75308_show_in(struct device *dev, + struct device_attribute *devattr, char *buf); + +static ssize_t f75308_show_fan(struct device *dev, + struct device_attribute *devattr, char *buf); + +static ssize_t f75308_show_pwm(struct device *dev, + struct device_attribute *devattr, char *buf); + +static ssize_t f75308_set_pwm(struct device *dev, + struct device_attribute *devattr, const char *buf, + size_t count); + +static ssize_t f75308_show_fan_type(struct device *dev, + struct device_attribute *devattr, + char *buf); + +static ssize_t f75308_set_fan_type(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count); + +static ssize_t f75308_show_fan_mode(struct device *dev, + struct device_attribute *devattr, + char *buf); + +static ssize_t f75308_set_fan_mode(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count); + +static ssize_t f75308_show_fan_5_seg(struct device *dev, + struct device_attribute *devattr, + char *buf); + +static ssize_t f75308_set_fan_5_seg(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count); + +static ssize_t f75308_show_fan_map(struct device *dev, + struct device_attribute *devattr, char *buf); + +static ssize_t f75308_set_fan_map(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count); + +static SENSOR_DEVICE_ATTR(in0_input, 0444, f75308_show_in, NULL, 0); +static SENSOR_DEVICE_ATTR(in1_input, 0444, f75308_show_in, NULL, 1); +static SENSOR_DEVICE_ATTR(in2_input, 0444, f75308_show_in, NULL, 2); +static SENSOR_DEVICE_ATTR(in3_input, 0444, f75308_show_in, NULL, 3); +static SENSOR_DEVICE_ATTR(in4_input, 0444, f75308_show_in, NULL, 4); +static SENSOR_DEVICE_ATTR(in5_input, 0444, f75308_show_in, NULL, 5); +static SENSOR_DEVICE_ATTR(in6_input, 0444, f75308_show_in, NULL, 6); +static SENSOR_DEVICE_ATTR(in7_input, 0444, f75308_show_in, NULL, 7); +static SENSOR_DEVICE_ATTR(in8_input, 0444, f75308_show_in, NULL, 8); +static SENSOR_DEVICE_ATTR(in9_input, 0444, f75308_show_in, NULL, 9); +static SENSOR_DEVICE_ATTR(in10_input, 0444, f75308_show_in, NULL, 10); +static SENSOR_DEVICE_ATTR(in11_input, 0444, f75308_show_in, NULL, 11); +static SENSOR_DEVICE_ATTR(in12_input, 0444, f75308_show_in, NULL, 12); +static SENSOR_DEVICE_ATTR(in13_input, 0444, f75308_show_in, NULL, 13); +static SENSOR_DEVICE_ATTR(in14_input, 0444, f75308_show_in, NULL, 14); + +static SENSOR_DEVICE_ATTR(temp_local_input, 0444, f75308_show_temp, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_input, 0444, f75308_show_temp, NULL, 1); +static SENSOR_DEVICE_ATTR(temp2_input, 0444, f75308_show_temp, NULL, 2); +static SENSOR_DEVICE_ATTR(temp3_input, 0444, f75308_show_temp, NULL, 3); +static SENSOR_DEVICE_ATTR(temp4_input, 0444, f75308_show_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp5_input, 0444, f75308_show_temp, NULL, 5); +static SENSOR_DEVICE_ATTR(temp6_input, 0444, f75308_show_temp, NULL, 6); + +static SENSOR_DEVICE_ATTR(fan1_input, 0444, f75308_show_fan, NULL, 0); +static SENSOR_DEVICE_ATTR(fan2_input, 0444, f75308_show_fan, NULL, 1); +static SENSOR_DEVICE_ATTR(fan3_input, 0444, f75308_show_fan, NULL, 2); +static SENSOR_DEVICE_ATTR(fan4_input, 0444, f75308_show_fan, NULL, 3); +static SENSOR_DEVICE_ATTR(fan5_input, 0444, f75308_show_fan, NULL, 4); +static SENSOR_DEVICE_ATTR(fan6_input, 0444, f75308_show_fan, NULL, 5); +static SENSOR_DEVICE_ATTR(fan7_input, 0444, f75308_show_fan, NULL, 6); +static SENSOR_DEVICE_ATTR(fan8_input, 0444, f75308_show_fan, NULL, 7); +static SENSOR_DEVICE_ATTR(fan9_input, 0444, f75308_show_fan, NULL, 8); +static SENSOR_DEVICE_ATTR(fan10_input, 0444, f75308_show_fan, NULL, 9); +static SENSOR_DEVICE_ATTR(fan11_input, 0444, f75308_show_fan, NULL, 10); +static SENSOR_DEVICE_ATTR(fan12_input, 0444, f75308_show_fan, NULL, 11); +static SENSOR_DEVICE_ATTR(fan13_input, 0444, f75308_show_fan, NULL, 12); +static SENSOR_DEVICE_ATTR(fan14_input, 0444, f75308_show_fan, NULL, 13); + +static SENSOR_DEVICE_ATTR(pwm1, 0644, f75308_show_pwm, f75308_set_pwm, 0); +static SENSOR_DEVICE_ATTR(pwm2, 0644, f75308_show_pwm, f75308_set_pwm, 1); +static SENSOR_DEVICE_ATTR(pwm3, 0644, f75308_show_pwm, f75308_set_pwm, 2); +static SENSOR_DEVICE_ATTR(pwm4, 0644, f75308_show_pwm, f75308_set_pwm, 3); +static SENSOR_DEVICE_ATTR(pwm5, 0644, f75308_show_pwm, f75308_set_pwm, 4); +static SENSOR_DEVICE_ATTR(pwm6, 0644, f75308_show_pwm, f75308_set_pwm, 5); +static SENSOR_DEVICE_ATTR(pwm7, 0644, f75308_show_pwm, f75308_set_pwm, 6); +static SENSOR_DEVICE_ATTR(pwm8, 0644, f75308_show_pwm, f75308_set_pwm, 7); +static SENSOR_DEVICE_ATTR(pwm9, 0644, f75308_show_pwm, f75308_set_pwm, 8); +static SENSOR_DEVICE_ATTR(pwm10, 0644, f75308_show_pwm, f75308_set_pwm, 9); +static SENSOR_DEVICE_ATTR(pwm11, 0644, f75308_show_pwm, f75308_set_pwm, 10); + +static SENSOR_DEVICE_ATTR(fan1_type, 0644, f75308_show_fan_type, + f75308_set_fan_type, 0); +static SENSOR_DEVICE_ATTR(fan2_type, 0644, f75308_show_fan_type, + f75308_set_fan_type, 1); +static SENSOR_DEVICE_ATTR(fan3_type, 0644, f75308_show_fan_type, + f75308_set_fan_type, 2); +static SENSOR_DEVICE_ATTR(fan4_type, 0644, f75308_show_fan_type, + f75308_set_fan_type, 3); +static SENSOR_DEVICE_ATTR(fan5_type, 0644, f75308_show_fan_type, + f75308_set_fan_type, 4); +static SENSOR_DEVICE_ATTR(fan6_type, 0644, f75308_show_fan_type, + f75308_set_fan_type, 5); +static SENSOR_DEVICE_ATTR(fan7_type, 0644, f75308_show_fan_type, + f75308_set_fan_type, 6); +static SENSOR_DEVICE_ATTR(fan8_type, 0644, f75308_show_fan_type, + f75308_set_fan_type, 7); +static SENSOR_DEVICE_ATTR(fan9_type, 0644, f75308_show_fan_type, + f75308_set_fan_type, 8); +static SENSOR_DEVICE_ATTR(fan10_type, 0644, f75308_show_fan_type, + f75308_set_fan_type, 9); +static SENSOR_DEVICE_ATTR(fan11_type, 0644, f75308_show_fan_type, + f75308_set_fan_type, 10); + +static SENSOR_DEVICE_ATTR(fan1_mode, 0644, f75308_show_fan_mode, + f75308_set_fan_mode, 0); +static SENSOR_DEVICE_ATTR(fan2_mode, 0644, f75308_show_fan_mode, + f75308_set_fan_mode, 1); +static SENSOR_DEVICE_ATTR(fan3_mode, 0644, f75308_show_fan_mode, + f75308_set_fan_mode, 2); +static SENSOR_DEVICE_ATTR(fan4_mode, 0644, f75308_show_fan_mode, + f75308_set_fan_mode, 3); +static SENSOR_DEVICE_ATTR(fan5_mode, 0644, f75308_show_fan_mode, + f75308_set_fan_mode, 4); +static SENSOR_DEVICE_ATTR(fan6_mode, 0644, f75308_show_fan_mode, + f75308_set_fan_mode, 5); +static SENSOR_DEVICE_ATTR(fan7_mode, 0644, f75308_show_fan_mode, + f75308_set_fan_mode, 6); +static SENSOR_DEVICE_ATTR(fan8_mode, 0644, f75308_show_fan_mode, + f75308_set_fan_mode, 7); +static SENSOR_DEVICE_ATTR(fan9_mode, 0644, f75308_show_fan_mode, + f75308_set_fan_mode, 8); +static SENSOR_DEVICE_ATTR(fan10_mode, 0644, f75308_show_fan_mode, + f75308_set_fan_mode, 9); +static SENSOR_DEVICE_ATTR(fan11_mode, 0644, f75308_show_fan_mode, + f75308_set_fan_mode, 10); + +static SENSOR_DEVICE_ATTR(fan1_5_seg, 0644, f75308_show_fan_5_seg, + f75308_set_fan_5_seg, 0); +static SENSOR_DEVICE_ATTR(fan2_5_seg, 0644, f75308_show_fan_5_seg, + f75308_set_fan_5_seg, 1); +static SENSOR_DEVICE_ATTR(fan3_5_seg, 0644, f75308_show_fan_5_seg, + f75308_set_fan_5_seg, 2); +static SENSOR_DEVICE_ATTR(fan4_5_seg, 0644, f75308_show_fan_5_seg, + f75308_set_fan_5_seg, 3); +static SENSOR_DEVICE_ATTR(fan5_5_seg, 0644, f75308_show_fan_5_seg, + f75308_set_fan_5_seg, 4); +static SENSOR_DEVICE_ATTR(fan6_5_seg, 0644, f75308_show_fan_5_seg, + f75308_set_fan_5_seg, 5); +static SENSOR_DEVICE_ATTR(fan7_5_seg, 0644, f75308_show_fan_5_seg, + f75308_set_fan_5_seg, 6); +static SENSOR_DEVICE_ATTR(fan8_5_seg, 0644, f75308_show_fan_5_seg, + f75308_set_fan_5_seg, 7); +static SENSOR_DEVICE_ATTR(fan9_5_seg, 0644, f75308_show_fan_5_seg, + f75308_set_fan_5_seg, 8); +static SENSOR_DEVICE_ATTR(fan10_5_seg, 0644, f75308_show_fan_5_seg, + f75308_set_fan_5_seg, 9); +static SENSOR_DEVICE_ATTR(fan11_5_seg, 0644, f75308_show_fan_5_seg, + f75308_set_fan_5_seg, 10); + +static SENSOR_DEVICE_ATTR(fan1_map, 0644, f75308_show_fan_map, + f75308_set_fan_map, 0); +static SENSOR_DEVICE_ATTR(fan2_map, 0644, f75308_show_fan_map, + f75308_set_fan_map, 1); +static SENSOR_DEVICE_ATTR(fan3_map, 0644, f75308_show_fan_map, + f75308_set_fan_map, 2); +static SENSOR_DEVICE_ATTR(fan4_map, 0644, f75308_show_fan_map, + f75308_set_fan_map, 3); +static SENSOR_DEVICE_ATTR(fan5_map, 0644, f75308_show_fan_map, + f75308_set_fan_map, 4); +static SENSOR_DEVICE_ATTR(fan6_map, 0644, f75308_show_fan_map, + f75308_set_fan_map, 5); +static SENSOR_DEVICE_ATTR(fan7_map, 0644, f75308_show_fan_map, + f75308_set_fan_map, 6); +static SENSOR_DEVICE_ATTR(fan8_map, 0644, f75308_show_fan_map, + f75308_set_fan_map, 7); +static SENSOR_DEVICE_ATTR(fan9_map, 0644, f75308_show_fan_map, + f75308_set_fan_map, 8); +static SENSOR_DEVICE_ATTR(fan10_map, 0644, f75308_show_fan_map, + f75308_set_fan_map, 9); +static SENSOR_DEVICE_ATTR(fan11_map, 0644, f75308_show_fan_map, + f75308_set_fan_map, 10); + +static struct attribute *f75308a_28_attributes[] = { + &sensor_dev_attr_temp_local_input.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp3_input.dev_attr.attr, + + &sensor_dev_attr_fan1_input.dev_attr.attr, + &sensor_dev_attr_fan2_input.dev_attr.attr, + &sensor_dev_attr_fan3_input.dev_attr.attr, + &sensor_dev_attr_fan4_input.dev_attr.attr, + + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in3_input.dev_attr.attr, + &sensor_dev_attr_in4_input.dev_attr.attr, + &sensor_dev_attr_in5_input.dev_attr.attr, + + &sensor_dev_attr_pwm1.dev_attr.attr, + &sensor_dev_attr_pwm2.dev_attr.attr, + &sensor_dev_attr_pwm3.dev_attr.attr, + &sensor_dev_attr_pwm4.dev_attr.attr, + + &sensor_dev_attr_fan1_type.dev_attr.attr, + &sensor_dev_attr_fan2_type.dev_attr.attr, + &sensor_dev_attr_fan3_type.dev_attr.attr, + &sensor_dev_attr_fan4_type.dev_attr.attr, + + &sensor_dev_attr_fan1_mode.dev_attr.attr, + &sensor_dev_attr_fan2_mode.dev_attr.attr, + &sensor_dev_attr_fan3_mode.dev_attr.attr, + &sensor_dev_attr_fan4_mode.dev_attr.attr, + + &sensor_dev_attr_fan1_map.dev_attr.attr, + &sensor_dev_attr_fan2_map.dev_attr.attr, + &sensor_dev_attr_fan3_map.dev_attr.attr, + &sensor_dev_attr_fan4_map.dev_attr.attr, + + &sensor_dev_attr_fan1_5_seg.dev_attr.attr, + &sensor_dev_attr_fan2_5_seg.dev_attr.attr, + &sensor_dev_attr_fan3_5_seg.dev_attr.attr, + &sensor_dev_attr_fan4_5_seg.dev_attr.attr, + + NULL +}; + +static struct attribute *f75308b_48_attributes[] = { + &sensor_dev_attr_temp_local_input.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp4_input.dev_attr.attr, + &sensor_dev_attr_temp5_input.dev_attr.attr, + + &sensor_dev_attr_fan1_input.dev_attr.attr, + &sensor_dev_attr_fan2_input.dev_attr.attr, + &sensor_dev_attr_fan3_input.dev_attr.attr, + &sensor_dev_attr_fan4_input.dev_attr.attr, + &sensor_dev_attr_fan5_input.dev_attr.attr, + &sensor_dev_attr_fan6_input.dev_attr.attr, + &sensor_dev_attr_fan7_input.dev_attr.attr, + &sensor_dev_attr_fan8_input.dev_attr.attr, + &sensor_dev_attr_fan9_input.dev_attr.attr, + + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in3_input.dev_attr.attr, + &sensor_dev_attr_in4_input.dev_attr.attr, + &sensor_dev_attr_in5_input.dev_attr.attr, + &sensor_dev_attr_in6_input.dev_attr.attr, + &sensor_dev_attr_in7_input.dev_attr.attr, + &sensor_dev_attr_in8_input.dev_attr.attr, + &sensor_dev_attr_in9_input.dev_attr.attr, + &sensor_dev_attr_in10_input.dev_attr.attr, + + &sensor_dev_attr_pwm1.dev_attr.attr, + &sensor_dev_attr_pwm2.dev_attr.attr, + &sensor_dev_attr_pwm3.dev_attr.attr, + &sensor_dev_attr_pwm4.dev_attr.attr, + &sensor_dev_attr_pwm5.dev_attr.attr, + &sensor_dev_attr_pwm6.dev_attr.attr, + &sensor_dev_attr_pwm7.dev_attr.attr, + + &sensor_dev_attr_fan1_type.dev_attr.attr, + &sensor_dev_attr_fan2_type.dev_attr.attr, + &sensor_dev_attr_fan3_type.dev_attr.attr, + &sensor_dev_attr_fan4_type.dev_attr.attr, + &sensor_dev_attr_fan5_type.dev_attr.attr, + &sensor_dev_attr_fan6_type.dev_attr.attr, + &sensor_dev_attr_fan7_type.dev_attr.attr, + + &sensor_dev_attr_fan1_mode.dev_attr.attr, + &sensor_dev_attr_fan2_mode.dev_attr.attr, + &sensor_dev_attr_fan3_mode.dev_attr.attr, + &sensor_dev_attr_fan4_mode.dev_attr.attr, + &sensor_dev_attr_fan5_mode.dev_attr.attr, + &sensor_dev_attr_fan6_mode.dev_attr.attr, + &sensor_dev_attr_fan7_mode.dev_attr.attr, + + &sensor_dev_attr_fan1_map.dev_attr.attr, + &sensor_dev_attr_fan2_map.dev_attr.attr, + &sensor_dev_attr_fan3_map.dev_attr.attr, + &sensor_dev_attr_fan4_map.dev_attr.attr, + &sensor_dev_attr_fan5_map.dev_attr.attr, + &sensor_dev_attr_fan6_map.dev_attr.attr, + &sensor_dev_attr_fan7_map.dev_attr.attr, + + &sensor_dev_attr_fan1_5_seg.dev_attr.attr, + &sensor_dev_attr_fan2_5_seg.dev_attr.attr, + &sensor_dev_attr_fan3_5_seg.dev_attr.attr, + &sensor_dev_attr_fan4_5_seg.dev_attr.attr, + &sensor_dev_attr_fan5_5_seg.dev_attr.attr, + &sensor_dev_attr_fan6_5_seg.dev_attr.attr, + &sensor_dev_attr_fan7_5_seg.dev_attr.attr, + + NULL +}; + +static struct attribute *f75308c_64_attributes[] = { + &sensor_dev_attr_temp_local_input.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp4_input.dev_attr.attr, + &sensor_dev_attr_temp5_input.dev_attr.attr, + &sensor_dev_attr_temp6_input.dev_attr.attr, + + &sensor_dev_attr_fan1_input.dev_attr.attr, + &sensor_dev_attr_fan2_input.dev_attr.attr, + &sensor_dev_attr_fan3_input.dev_attr.attr, + &sensor_dev_attr_fan4_input.dev_attr.attr, + &sensor_dev_attr_fan5_input.dev_attr.attr, + &sensor_dev_attr_fan6_input.dev_attr.attr, + &sensor_dev_attr_fan7_input.dev_attr.attr, + &sensor_dev_attr_fan8_input.dev_attr.attr, + &sensor_dev_attr_fan9_input.dev_attr.attr, + &sensor_dev_attr_fan10_input.dev_attr.attr, + &sensor_dev_attr_fan11_input.dev_attr.attr, + &sensor_dev_attr_fan12_input.dev_attr.attr, + &sensor_dev_attr_fan13_input.dev_attr.attr, + &sensor_dev_attr_fan14_input.dev_attr.attr, + + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in3_input.dev_attr.attr, + &sensor_dev_attr_in4_input.dev_attr.attr, + &sensor_dev_attr_in5_input.dev_attr.attr, + &sensor_dev_attr_in6_input.dev_attr.attr, + &sensor_dev_attr_in7_input.dev_attr.attr, + &sensor_dev_attr_in8_input.dev_attr.attr, + &sensor_dev_attr_in9_input.dev_attr.attr, + &sensor_dev_attr_in10_input.dev_attr.attr, + &sensor_dev_attr_in11_input.dev_attr.attr, + &sensor_dev_attr_in12_input.dev_attr.attr, + &sensor_dev_attr_in13_input.dev_attr.attr, + &sensor_dev_attr_in14_input.dev_attr.attr, + + &sensor_dev_attr_pwm1.dev_attr.attr, + &sensor_dev_attr_pwm2.dev_attr.attr, + &sensor_dev_attr_pwm3.dev_attr.attr, + &sensor_dev_attr_pwm4.dev_attr.attr, + &sensor_dev_attr_pwm5.dev_attr.attr, + &sensor_dev_attr_pwm6.dev_attr.attr, + &sensor_dev_attr_pwm7.dev_attr.attr, + &sensor_dev_attr_pwm8.dev_attr.attr, + &sensor_dev_attr_pwm9.dev_attr.attr, + &sensor_dev_attr_pwm10.dev_attr.attr, + &sensor_dev_attr_pwm11.dev_attr.attr, + + &sensor_dev_attr_fan1_type.dev_attr.attr, + &sensor_dev_attr_fan2_type.dev_attr.attr, + &sensor_dev_attr_fan3_type.dev_attr.attr, + &sensor_dev_attr_fan4_type.dev_attr.attr, + &sensor_dev_attr_fan5_type.dev_attr.attr, + &sensor_dev_attr_fan6_type.dev_attr.attr, + &sensor_dev_attr_fan7_type.dev_attr.attr, + &sensor_dev_attr_fan8_type.dev_attr.attr, + &sensor_dev_attr_fan9_type.dev_attr.attr, + &sensor_dev_attr_fan10_type.dev_attr.attr, + &sensor_dev_attr_fan11_type.dev_attr.attr, + + &sensor_dev_attr_fan1_mode.dev_attr.attr, + &sensor_dev_attr_fan2_mode.dev_attr.attr, + &sensor_dev_attr_fan3_mode.dev_attr.attr, + &sensor_dev_attr_fan4_mode.dev_attr.attr, + &sensor_dev_attr_fan5_mode.dev_attr.attr, + &sensor_dev_attr_fan6_mode.dev_attr.attr, + &sensor_dev_attr_fan7_mode.dev_attr.attr, + &sensor_dev_attr_fan8_mode.dev_attr.attr, + &sensor_dev_attr_fan9_mode.dev_attr.attr, + &sensor_dev_attr_fan10_mode.dev_attr.attr, + &sensor_dev_attr_fan11_mode.dev_attr.attr, + + &sensor_dev_attr_fan1_map.dev_attr.attr, + &sensor_dev_attr_fan2_map.dev_attr.attr, + &sensor_dev_attr_fan3_map.dev_attr.attr, + &sensor_dev_attr_fan4_map.dev_attr.attr, + &sensor_dev_attr_fan5_map.dev_attr.attr, + &sensor_dev_attr_fan6_map.dev_attr.attr, + &sensor_dev_attr_fan7_map.dev_attr.attr, + &sensor_dev_attr_fan8_map.dev_attr.attr, + &sensor_dev_attr_fan9_map.dev_attr.attr, + &sensor_dev_attr_fan10_map.dev_attr.attr, + &sensor_dev_attr_fan11_map.dev_attr.attr, + + &sensor_dev_attr_fan1_5_seg.dev_attr.attr, + &sensor_dev_attr_fan2_5_seg.dev_attr.attr, + &sensor_dev_attr_fan3_5_seg.dev_attr.attr, + &sensor_dev_attr_fan4_5_seg.dev_attr.attr, + &sensor_dev_attr_fan5_5_seg.dev_attr.attr, + &sensor_dev_attr_fan6_5_seg.dev_attr.attr, + &sensor_dev_attr_fan7_5_seg.dev_attr.attr, + &sensor_dev_attr_fan8_5_seg.dev_attr.attr, + &sensor_dev_attr_fan9_5_seg.dev_attr.attr, + &sensor_dev_attr_fan10_5_seg.dev_attr.attr, + &sensor_dev_attr_fan11_5_seg.dev_attr.attr, + + NULL +}; + +static const struct attribute_group f75308a_28_group = { + .attrs = f75308a_28_attributes, +}; + +static const struct attribute_group f75308b_48_group = { + .attrs = f75308b_48_attributes, +}; + +static const struct attribute_group f75308c_64_group = { + .attrs = f75308c_64_attributes, +}; + +static const struct attribute_group *f75308a_28_groups[] = { + &f75308a_28_group, + NULL, +}; + +static const struct attribute_group *f75308b_48_groups[] = { + &f75308b_48_group, + NULL, +}; + +static const struct attribute_group *f75308c_64_groups[] = { + &f75308c_64_group, + NULL, +}; + +static const struct attribute_group **f75308_groups[] = { + f75308a_28_groups, + f75308b_48_groups, + f75308c_64_groups, +}; + +static int f75308_read8(struct i2c_client *client, u8 reg) +{ + return i2c_smbus_read_byte_data(client, reg); +} + +static int f75308_write8(struct i2c_client *client, u8 reg, u8 value) +{ + return i2c_smbus_write_byte_data(client, reg, value); +} + +static int f75308_write_mask8(struct i2c_client *client, u8 reg, u8 mask, + u8 value) +{ + int status; + + status = f75308_read8(client, reg); + if (status < 0) + return status; + + status = (status & ~mask) | (value & mask); + + return f75308_write8(client, reg, (u8)status); +} + +static inline u16 f75308_read16(struct i2c_client *client, u8 reg) +{ + return (i2c_smbus_read_byte_data(client, reg) << 8) | + i2c_smbus_read_byte_data(client, reg + 1); +} + +static ssize_t f75308_show_temp(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status, deci, frac, data; + + mutex_lock(&priv->locker); + + status = f75308_write8(client, F75308_REG_BANK, 0); + if (status) + goto err; + + status = f75308_read8(client, F75308_REG_TEMP_READ(nr) + 0); + if (status < 0) + goto err; + deci = status; + + status = f75308_read8(client, F75308_REG_TEMP_READ(nr) + 1); + if (status < 0) + goto err; + frac = status; + + data = deci * 1000 + (frac >> 5) * 125; + mutex_unlock(&priv->locker); + + dev_dbg(dev, "%s: nr:%d deci:%d frac:%d, data:%d\n", __func__, nr, deci, + frac, data); + return sprintf(buf, "%d\n", data); + +err: + mutex_unlock(&priv->locker); + return status; +} + +static ssize_t f75308_show_in(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status = 0, data; + + mutex_lock(&priv->locker); + + status = f75308_write8(client, F75308_REG_BANK, 0); + if (status) + goto err; + + data = f75308_read8(client, F75308_REG_VOLT(nr)); + if (data < 0) { + status = data; + goto err; + } + + data *= 8; + mutex_unlock(&priv->locker); + + return sprintf(buf, "%d\n", data); + +err: + mutex_unlock(&priv->locker); + return status; +} + +static ssize_t f75308_show_fan(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status, lsb, msb, data; + + mutex_lock(&priv->locker); + + status = f75308_write8(client, F75308_REG_BANK, 0); + if (status) + goto err; + + status = f75308_read8(client, F75308_REG_FAN_READ(nr) + 0); + if (status < 0) + goto err; + msb = status; + + status = f75308_read8(client, F75308_REG_FAN_READ(nr) + 1); + if (status < 0) + goto err; + lsb = status; + + dev_dbg(dev, "%s: nr: %d, msb: %x, lsb: %x\n", __func__, nr, msb, lsb); + + if (msb == 0x1f && lsb == 0xff) + data = 0; + else if (msb || lsb) + data = 1500000 / (msb * 256 + lsb); + else + data = 0; + + mutex_unlock(&priv->locker); + + return sprintf(buf, "%d\n", data); + +err: + mutex_unlock(&priv->locker); + return status; +} + +static ssize_t f75308_show_pwm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status = 0, data; + + mutex_lock(&priv->locker); + + status = f75308_write8(client, F75308_REG_BANK, 0); + if (status) + goto done; + + data = f75308_read8(client, 0xa0 + nr); + if (data < 0) { + status = data; + goto done; + } + + status = sprintf(buf, "%d\n", data); + +done: + mutex_unlock(&priv->locker); + return status; +} + +static ssize_t f75308_set_pwm(struct device *dev, + struct device_attribute *devattr, const char *buf, + size_t count) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status = 0, fan_mode, pwm; + + status = kstrtoint(buf, 0, &pwm); + if (status) + return status; + + pwm = clamp_val(pwm, 0, 255); + + mutex_lock(&priv->locker); + + /* check whether fan mode is in manual duty mode */ + status = f75308_write8(client, F75308_REG_BANK, 4); + if (status) + goto done; + + fan_mode = f75308_read8(client, 0x74 + nr / 4); + if (fan_mode < 0) { + status = fan_mode; + goto done; + } + + fan_mode = fan_mode >> ((nr % 4) * 2); + fan_mode = fan_mode & 0x03; + if (fan_mode != 0x03) { + dev_err(dev, "%s: Only manual_duty mode supports PWM write!\n", + __func__); + status = -EOPNOTSUPP; + goto done; + } + + status = f75308_write8(client, F75308_REG_BANK, 5); + if (status) + goto done; + + status = f75308_write8(client, 0x11 + nr * 0x10, pwm); +done: + mutex_unlock(&priv->locker); + return status ? status : count; +} + +static ssize_t f75308_show_fan_type(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status = 0, data; + + mutex_lock(&priv->locker); + + status = f75308_write8(client, F75308_REG_BANK, 4); + if (status) + goto done; + + data = f75308_read8(client, 0x70 + nr / 4); + if (data < 0) { + status = data; + goto done; + } + + data = data >> ((nr % 4) * 2); + data = data & 0x03; + + switch (data) { + case 0: + status = sprintf(buf, "pwm\n"); + break; + case 1: + status = sprintf(buf, "linear\n"); + break; + case 2: + status = sprintf(buf, "pwm_opendrain\n"); + break; + + default: + case 3: + status = sprintf(buf, "%s: invalid data: nr: %d, data: %xh\n", + __func__, nr, data); + break; + } + +done: + mutex_unlock(&priv->locker); + return status; +} + +static int __f75308_set_fan_type(struct i2c_client *client, int nr, + const char *buf) +{ + int status, data, shift; + + status = f75308_write8(client, F75308_REG_BANK, 4); + if (status) + return status; + + if (!strncmp(buf, "pwm_opendrain", strlen("pwm_opendrain"))) { + data = 0x02; + } else if (!strncmp(buf, "linear", strlen("linear"))) { + data = 0x01; + } else if (!strncmp(buf, "pwm", strlen("pwm"))) { + data = 0x00; + } else { + dev_err(&client->dev, + "%s: support only pwm/linear/pwm_opendrain\n", + __func__); + return -EINVAL; + } + + shift = ((nr % 4) * 2); + + return f75308_write_mask8(client, 0x70 + nr / 4, 3 << shift, + data << shift); +} + +static ssize_t f75308_set_fan_type(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status = 0; + + mutex_lock(&priv->locker); + status = __f75308_set_fan_type(client, nr, buf); + mutex_unlock(&priv->locker); + + if (status) + return status; + + return count; +} + +static ssize_t f75308_show_fan_mode(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status = 0, data; + + mutex_lock(&priv->locker); + + status = f75308_write8(client, F75308_REG_BANK, 4); + if (status) + goto done; + + data = f75308_read8(client, 0x74 + nr / 4); + if (data < 0) { + status = data; + goto done; + } + + data = data >> ((nr % 4) * 2); + data = data & 0x03; + + switch (data) { + case 0: + status = sprintf(buf, "auto_rpm\n"); + break; + case 1: + status = sprintf(buf, "auto_duty\n"); + break; + case 2: + status = sprintf(buf, "manual_rpm\n"); + break; + case 3: + status = sprintf(buf, "manual_duty\n"); + break; + default: + status = sprintf(buf, "%s: invalid data: nr: %d, data: %xh\n", + __func__, nr, data); + break; + } + +done: + mutex_unlock(&priv->locker); + return status; +} + +static int __f75308_set_fan_mode(struct i2c_client *client, int nr, + const char *buf) +{ + int status, data, shift; + + status = f75308_write8(client, F75308_REG_BANK, 4); + if (status) + return status; + + if (!strncmp(buf, "manual_rpm", strlen("manual_rpm"))) { + data = 0x02; + } else if (!strncmp(buf, "manual_duty", strlen("manual_duty"))) { + data = 0x03; + } else if (!strncmp(buf, "auto_rpm", strlen("auto_rpm"))) { + data = 0x00; + } else if (!strncmp(buf, "auto_duty", strlen("auto_duty"))) { + data = 0x01; + } else { + dev_err(&client->dev, + "%s: support only manual_rpm/manual_duty/auto_rpm/auto_duty\n", + __func__); + + return -EINVAL; + } + + shift = ((nr % 4) * 2); + + return f75308_write_mask8(client, 0x74 + nr / 4, 3 << shift, + data << shift); +} + +static ssize_t f75308_set_fan_mode(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status; + + mutex_lock(&priv->locker); + status = __f75308_set_fan_mode(client, nr, buf); + mutex_unlock(&priv->locker); + + if (status) + return status; + + return count; +} + +static ssize_t f75308_show_fan_5_seg(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status = 0, data[5], i, tmp; + + mutex_lock(&priv->locker); + + status = f75308_write8(client, F75308_REG_BANK, 5); + if (status) + goto done; + + for (i = 0; i < 5; ++i) { + data[i] = f75308_read8(client, 0x18 + nr * 0x10 + i); + if (data[i] < 0) { + status = data[i]; + goto done; + } + + tmp = data[i] * 100 / 255; + dev_dbg(dev, "%s: reg: %x, data: %x, %d%%\n", __func__, + 0x18 + nr * 0x10 + i, data[i], tmp); + data[i] = tmp; + } + + status = sprintf(buf, "%d%% %d%% %d%% %d%% %d%%\n", data[0], data[1], + data[2], data[3], data[4]); +done: + mutex_unlock(&priv->locker); + return status; +} + +static int __f75308_set_fan_5_seg(struct i2c_client *client, int nr, + int data[5]) +{ + int status, i, tmp; + + for (i = 0; i < 5; ++i) { + if (data[i] > 100 || data[i] < 0) + return -EINVAL; + } + + status = f75308_write8(client, F75308_REG_BANK, 5); + if (status) + return status; + + for (i = 0; i < 5; ++i) { + tmp = 255 * data[i] / 100; + + status = f75308_write8(client, 0x18 + nr * 0x10 + i, (u8)tmp); + if (status) + return status; + + dev_dbg(&client->dev, "%s: reg: %x, data: %x\n", __func__, + 0x18 + nr * 0x10 + i, tmp); + } + + return 0; +} + +static ssize_t f75308_set_fan_5_seg(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status, data[5], i; + u8 *p; + + mutex_lock(&priv->locker); + + for (i = 0; i < 5; ++i) { + p = strsep((char **)&buf, " "); + if (!p) { + count = -EINVAL; + goto done; + } + + status = kstrtoint(p, 0, &data[i]); + if (status) { + count = status; + goto done; + } + + if (data[i] > 100 || data[i] < 0) { + count = -EINVAL; + goto done; + } + } + + status = __f75308_set_fan_5_seg(client, nr, data); + if (status) + count = status; + +done: + mutex_unlock(&priv->locker); + return count; +} + +static ssize_t f75308_show_fan_map(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status = 0, data; + + mutex_lock(&priv->locker); + + status = f75308_write8(client, F75308_REG_BANK, 4); + if (status) + goto done; + + data = f75308_read8(client, 0x50 + nr); + if (data < 0) { + status = data; + goto done; + } + + dev_dbg(dev, "%s: idx: %d, data: %x\n", __func__, nr, data); + status = sprintf(buf, "%d\n", data); +done: + mutex_unlock(&priv->locker); + return status; +} + +static ssize_t f75308_set_fan_map(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct f75308_priv *priv = dev_get_drvdata(dev); + struct i2c_client *client = priv->client; + int nr = to_sensor_dev_attr_2(devattr)->index; + int status = 0, data; + + status = kstrtoint(buf, 0, &data); + if (status) { + count = status; + goto done; + } + + mutex_lock(&priv->locker); + + status = f75308_write8(client, F75308_REG_BANK, 4); + if (status) + goto done; + + status = f75308_write8(client, 0x50 + nr, data); + if (status) + goto done; + + status = count; + dev_dbg(dev, "%s: idx: %d, data: %x\n", __func__, nr, data); +done: + mutex_unlock(&priv->locker); + + return status; +} + +static int f75308_get_devid(struct i2c_client *client, enum chip *chipid) +{ + u16 vendid, pid; + int status; + + status = f75308_write8(client, F75308_REG_BANK, 0); + if (status) + return status; + + vendid = f75308_read16(client, DEVICE_VID_ADDR); + pid = f75308_read16(client, DEVICE_PID_ADDR); + if (vendid != DEVICE_VID) + return -ENODEV; + + if (pid == DEVICE_PID_64PIN) + *chipid = f75308c_64; + else if (pid == DEVICE_PID_48PIN) + *chipid = f75308b_48; + else if (pid == DEVICE_PID_28PIN) + *chipid = f75308a_28; + else + return -ENODEV; + + return 0; +} + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int f75308_detect(struct i2c_client *client, struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + enum chip chipid; + const char *name; + int status = 0; + + status = f75308_get_devid(client, &chipid); + if (status) + return status; + + if (chipid == f75308a_28) + name = "F75308AR"; + else if (chipid == f75308b_48) + name = "F75308BD"; + else if (chipid == f75308c_64) + name = "F75308CU"; + else + return -ENODEV; + + dev_info(&adapter->dev, "%s: found %s with addr %x on %s\n", __func__, + name, info->addr, adapter->name); + strlcpy(info->type, name, I2C_NAME_SIZE); + + return 0; +} + +static int f75308_init(struct i2c_client *client) +{ + struct f75308_priv *priv = dev_get_drvdata(&client->dev); + int status, tmp; + + // check f75308a_28 mapping is default + if (priv->chip_id == f75308a_28) { + status = f75308_write8(client, F75308_REG_BANK, 4); + if (status) + goto err; + + tmp = f75308_read8(client, 0x53); + if (tmp < 0) + return tmp; + + if (tmp == 0x04) { + // re-mapping FAN4 to T0 + status = f75308_write8(client, 0x53, 0); + if (status) + goto err; + } + + status = f75308_write8(client, F75308_REG_BANK, 0); + if (status) + goto err; + } + + return 0; + +err: + return status; +} + +static int f75308_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device_node *child, *np = client->dev.of_node; + struct property *prop; + struct f75308_priv *priv; + int status, seg5[5]; + const char *val_str; + const __be32 *p; + int val, reg_idx, i; + + priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mutex_init(&priv->locker); + priv->client = client; + dev_set_drvdata(&client->dev, priv); + + if (np) + dev_dbg(&client->dev, "%s: np name: %s, full name: %s\n", + __func__, np->name, np->full_name); + + status = f75308_get_devid(client, &priv->chip_id); + if (status) { + dev_err(&client->dev, "%s: f75308_get_devid error: %d\n", + __func__, status); + goto destroy_lock; + } + + status = f75308_init(client); + if (status) { + dev_err(&client->dev, "%s: f75308_init error: %d\n", __func__, + status); + goto destroy_lock; + } + + for_each_child_of_node(np, child) { + dev_dbg(&client->dev, "%s: child name: %s, full name: %s\n", + __func__, child->name, child->full_name); + + if (of_property_read_u32(child, "reg", ®_idx)) { + dev_err(&client->dev, "missing reg property of %pOFn\n", + child); + status = -EINVAL; + goto put_child; + + } else { + dev_dbg(&client->dev, "%s: reg_idx: %d\n", __func__, + reg_idx); + } + + if (of_property_read_string(child, "type", &val_str)) { + dev_err(&client->dev, "read type failed or no type\n"); + } else { + dev_dbg(&client->dev, "%s: type: %s\n", __func__, + val_str); + + status = + __f75308_set_fan_type(client, reg_idx, val_str); + if (status) + goto put_child; + } + + if (of_property_read_string(child, "duty", &val_str)) { + dev_err(&client->dev, "read duty failed or no duty\n"); + } else { + dev_dbg(&client->dev, "%s: duty: %s\n", __func__, + val_str); + + status = + __f75308_set_fan_mode(client, reg_idx, val_str); + if (status) + goto put_child; + } + + i = 0; + of_property_for_each_u32(child, "5seg", prop, p, val) { + dev_dbg(&client->dev, "%s: 5seg: i: %d, val: %d\n", + __func__, i, val); + seg5[i] = val; + i++; + } + + if (i == 5) { + status = __f75308_set_fan_5_seg(client, reg_idx, seg5); + if (status) + goto put_child; + } + } + + priv->hwmon_dev = devm_hwmon_device_register_with_groups( + &client->dev, DEVICE_NAME, priv, f75308_groups[priv->chip_id]); + if (IS_ERR(priv->hwmon_dev)) { + status = PTR_ERR(priv->hwmon_dev); + goto put_child; + } + + dev_info(&client->dev, "Finished f75308 probing\n"); + return 0; + +put_child: + of_node_put(child); +destroy_lock: + mutex_destroy(&priv->locker); + return status; +} + +static int f75308_remove(struct i2c_client *client) +{ + struct f75308_priv *priv = dev_get_drvdata(&client->dev); + + mutex_destroy(&priv->locker); + return 0; +} + +static const unsigned short f75308_addr[] = { + 0x58 >> 1, 0x5A >> 1, 0x5C >> 1, 0x5E >> 1, + 0x98 >> 1, 0x9A >> 1, 0x9C >> 1, 0x9E >> 1, + I2C_CLIENT_END, +}; + +static const struct i2c_device_id f75308_id[] = { { "F75308CU", f75308c_64 }, + { "F75308BD", f75308b_48 }, + { "F75308AR", f75308a_28 }, + {} }; + +MODULE_DEVICE_TABLE(i2c, f75308_id); + +#ifdef CONFIG_OF +static const struct of_device_id f75308_match_table[] = { + { .compatible = "fintek,f75308" }, + {}, +}; +MODULE_DEVICE_TABLE(of, f75308_match_table); +#else +#define f75308_match_table NULL +#endif + +static struct i2c_driver f75308_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = DEVICE_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(f75308_match_table), + }, + + .detect = f75308_detect, + .probe = f75308_probe, + .remove = f75308_remove, + .address_list = f75308_addr, + .id_table = f75308_id, +}; + +module_i2c_driver(f75308_driver); + +MODULE_AUTHOR("Ji-Ze Hong (Peter Hong) "); +MODULE_AUTHOR("Yi-Wei Wang "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("F75308 hardware monitoring driver"); diff --git a/kernel/nvidia/drivers/media/platform/tegra/camera/nvcsi/csi5_fops.c b/kernel/nvidia/drivers/media/platform/tegra/camera/nvcsi/csi5_fops.c index ccefa4c625..b104ab3eb9 100644 --- a/kernel/nvidia/drivers/media/platform/tegra/camera/nvcsi/csi5_fops.c +++ b/kernel/nvidia/drivers/media/platform/tegra/camera/nvcsi/csi5_fops.c @@ -219,7 +219,8 @@ static int csi5_stream_set_config(struct tegra_csi_channel *chan, u32 stream_id, struct CAPTURE_CONTROL_MSG msg; struct nvcsi_brick_config brick_config; struct nvcsi_cil_config cil_config; - bool is_cphy = (csi_lanes == 3); + u32 phy_mode = read_phy_mode_from_dt(chan); + bool is_cphy = (phy_mode == CSI_PHY_MODE_CPHY); dev_dbg(csi->dev, "%s: stream_id=%u, csi_port=%u\n", __func__, stream_id, csi_port); diff --git a/kernel/nvidia/drivers/media/platform/tegra/camera/vi/channel.c b/kernel/nvidia/drivers/media/platform/tegra/camera/vi/channel.c index 2d20688603..df03a82762 100644 --- a/kernel/nvidia/drivers/media/platform/tegra/camera/vi/channel.c +++ b/kernel/nvidia/drivers/media/platform/tegra/camera/vi/channel.c @@ -1,13 +1,18 @@ -/* - * NVIDIA Tegra Video Input Device +/* NVIDIA Tegra Video Input Device + * Author: Bryan Wu + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. * - * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. * - * Author: Bryan Wu + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ #include @@ -1353,6 +1358,11 @@ int tegra_channel_s_ctrl(struct v4l2_ctrl *ctrl) struct tegra_channel, ctrl_handler); int err = 0; + /* Check device is busy or not, While setting bypass mode*/ + if (vb2_is_busy(&chan->queue) && (TEGRA_CAMERA_CID_VI_BYPASS_MODE == ctrl->id)) { + return -EBUSY; + } + switch (ctrl->id) { case TEGRA_CAMERA_CID_GAIN_TPG: { diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/Makefile b/kernel/nvidia/drivers/misc/tegra-profiler/Makefile index 1f4861ca7f..4416fb278d 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/Makefile +++ b/kernel/nvidia/drivers/misc/tegra-profiler/Makefile @@ -10,7 +10,7 @@ # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # -# Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved. # KBUILD_CFLAGS += -I$(srctree.nvidia)/drivers/platform/tegra @@ -25,7 +25,6 @@ tegra-profiler-y := \ comm.o \ mmap.o \ backtrace.o \ - debug.o \ ma.o \ power_clk.o \ auth.o \ diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/armv7_pmu.c b/kernel/nvidia/drivers/misc/tegra-profiler/armv7_pmu.c index 0c129e10df..e975a318fd 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/armv7_pmu.c +++ b/kernel/nvidia/drivers/misc/tegra-profiler/armv7_pmu.c @@ -1,7 +1,7 @@ /* * drivers/misc/tegra-profiler/armv7_pmu.c * - * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -28,7 +28,6 @@ #include "armv7_pmu.h" #include "armv7_events.h" #include "quadd.h" -#include "debug.h" static DEFINE_PER_CPU(struct quadd_pmu_ctx, pmu_ctx); @@ -514,8 +513,6 @@ static void pmu_start(void) reset_all_counters(); enable_all_counters(); } - - qm_debug_start_source(QUADD_EVENT_SOURCE_PMU); } static void pmu_stop(void) @@ -532,8 +529,6 @@ static void pmu_stop(void) write_counter(idx, 0); } } - - qm_debug_stop_source(QUADD_EVENT_SOURCE_PMU); } static int @@ -591,9 +586,6 @@ pmu_read(struct quadd_event_data *events, int max_events) *prevp = val; - qm_debug_read_counter(&events->event, events->prev_val, - events->val); - if (++i >= max_events) break; diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/armv8_pmu.c b/kernel/nvidia/drivers/misc/tegra-profiler/armv8_pmu.c index 3e60ff82be..2782334e65 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/armv8_pmu.c +++ b/kernel/nvidia/drivers/misc/tegra-profiler/armv8_pmu.c @@ -1,7 +1,7 @@ /* * drivers/misc/tegra-profiler/armv8_pmu.c * - * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -33,7 +33,6 @@ #include "armv8_pmu.h" #include "armv8_events.h" #include "quadd.h" -#include "debug.h" struct quadd_pmu_info { DECLARE_BITMAP(used_cntrs, QUADD_MAX_PMU_COUNTERS); @@ -591,8 +590,6 @@ static void pmu_start(void) reset_all_counters(); enable_all_counters(); } - - qm_debug_start_source(QUADD_EVENT_SOURCE_PMU); } static void pmu_stop(void) @@ -609,8 +606,6 @@ static void pmu_stop(void) write_counter(idx, 0); } } - - qm_debug_stop_source(QUADD_EVENT_SOURCE_PMU); } static int @@ -668,9 +663,6 @@ pmu_read(struct quadd_event_data *events, int max_events) *prevp = val; - qm_debug_read_counter(&events->event, events->prev_val, - events->val); - if (++i >= max_events) break; diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/auth.c b/kernel/nvidia/drivers/misc/tegra-profiler/auth.c index 9d59cbd66c..638bd15f06 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/auth.c +++ b/kernel/nvidia/drivers/misc/tegra-profiler/auth.c @@ -1,7 +1,7 @@ /* * drivers/misc/tegra-profiler/auth.c * - * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -24,7 +24,6 @@ #include "auth.h" #include "quadd.h" -#include "debug.h" #define QUADD_SECURITY_MAGIC_REQUEST 0x11112222 #define QUADD_SECURITY_MAGIC_RESPONSE 0x33334444 diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/carmel_pmu.c b/kernel/nvidia/drivers/misc/tegra-profiler/carmel_pmu.c index be77470ae2..b0d3cae395 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/carmel_pmu.c +++ b/kernel/nvidia/drivers/misc/tegra-profiler/carmel_pmu.c @@ -1,7 +1,7 @@ /* * drivers/misc/tegra-profiler/carmel_pmu.c * - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -33,7 +33,6 @@ #include "carmel_pmu.h" #include "quadd.h" -#include "debug.h" /* * Some parts of this code are taken from platform/tegra/tegra19_perf_uncore.c diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/debug.c b/kernel/nvidia/drivers/misc/tegra-profiler/debug.c deleted file mode 100644 index da4fa514d3..0000000000 --- a/kernel/nvidia/drivers/misc/tegra-profiler/debug.c +++ /dev/null @@ -1,175 +0,0 @@ -/* - * drivers/misc/tegra-profiler/debug.c - * - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - */ - -#include -#include - -#include - -#include "debug.h" -#include "hrt.h" -#include "tegra.h" -#include "comm.h" - -#ifdef QM_DEBUG_SAMPLES_ENABLE - -static inline void -init_sample(struct quadd_record_data *record, struct pt_regs *regs) -{ - unsigned int flags; - struct quadd_debug_data *s = &record->debug; - - record->magic = QUADD_RECORD_MAGIC; - record->record_type = QUADD_RECORD_TYPE_DEBUG; - - if (!regs) - regs = get_irq_regs(); - - if (!regs) - s->user_mode = 0; - else - s->user_mode = user_mode(regs) ? 1 : 0; - - s->cpu = quadd_get_processor_id(regs, &flags); - - s->lp_mode = flags & QUADD_CPUMODE_TEGRA_POWER_CLUSTER_LP ? 1 : 0; - s->thumb_mode = flags & QUADD_CPUMODE_THUMB ? 1 : 0; - - s->reserved = 0; - - s->pid = 0; - s->time = quadd_get_time(); - - s->extra_value[0] = 0; - s->extra_value[1] = 0; - - s->extra_length = 0; -} - -void qm_debug_handler_sample(struct pt_regs *regs) -{ - struct quadd_record_data record; - struct quadd_debug_data *s = &record.debug; - - init_sample(&record, regs); - - s->type = QM_DEBUG_SAMPLE_TYPE_TIMER_HANDLE; - - quadd_put_sample_this_cpu(&record, NULL, 0); -} - -void qm_debug_timer_forward(struct pt_regs *regs, u64 period) -{ - struct quadd_record_data record; - struct quadd_debug_data *s = &record.debug; - - init_sample(&record, regs); - - s->type = QM_DEBUG_SAMPLE_TYPE_TIMER_FORWARD; - - quadd_put_sample_this_cpu(&record, NULL, 0); -} - -void qm_debug_timer_start(struct pt_regs *regs, u64 period) -{ - struct quadd_record_data record; - struct quadd_debug_data *s = &record.debug; - - init_sample(&record, regs); - - s->type = QM_DEBUG_SAMPLE_TYPE_TIMER_START; - - quadd_put_sample_this_cpu(&record, NULL, 0); -} - -void qm_debug_timer_cancel(void) -{ - struct quadd_record_data record; - struct quadd_debug_data *s = &record.debug; - - init_sample(&record, NULL); - - s->type = QM_DEBUG_SAMPLE_TYPE_TIMER_CANCEL; - - quadd_put_sample_this_cpu(&record, NULL, 0); -} - -void -qm_debug_task_sched_in(pid_t prev_pid, pid_t current_pid, int prev_nr_active) -{ - struct quadd_iovec vec; - struct quadd_record_data record; - struct quadd_debug_data *s = &record.debug; - - init_sample(&record, NULL); - - s->type = QM_DEBUG_SAMPLE_TYPE_SCHED_IN; - - s->extra_value[0] = prev_pid; - s->extra_value[1] = current_pid; - - vec.base = &prev_nr_active; - vec.len = s->extra_length = sizeof(prev_nr_active); - - quadd_put_sample_this_cpu(&record, &vec, 1); -} - -void qm_debug_read_counter(struct quadd_event *event, u32 prev_val, u32 val) -{ - struct quadd_iovec vec; - struct quadd_record_data record; - struct quadd_debug_data *s = &record.debug; - - init_sample(&record, NULL); - - s->type = QM_DEBUG_SAMPLE_TYPE_READ_COUNTER; - - s->extra_value[0] = event->id; - s->extra_value[1] = prev_val; - - vec.base = &val; - vec.len = s->extra_length = sizeof(val); - - quadd_put_sample_this_cpu(&record, &vec, 1); -} - -void qm_debug_start_source(int source_type) -{ - struct quadd_record_data record; - struct quadd_debug_data *s = &record.debug; - - init_sample(&record, NULL); - - s->type = QM_DEBUG_SAMPLE_TYPE_SOURCE_START; - s->extra_value[0] = source_type; - - quadd_put_sample_this_cpu(&record, NULL, 0); -} - -void qm_debug_stop_source(int source_type) -{ - struct quadd_record_data record; - struct quadd_debug_data *s = &record.debug; - - init_sample(&record, NULL); - - s->type = QM_DEBUG_SAMPLE_TYPE_SOURCE_STOP; - s->extra_value[0] = source_type; - - quadd_put_sample_this_cpu(&record, NULL, 0); -} - -#endif /* QM_DEBUG_SAMPLES_ENABLE */ diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/debug.h b/kernel/nvidia/drivers/misc/tegra-profiler/debug.h deleted file mode 100644 index ae50687c79..0000000000 --- a/kernel/nvidia/drivers/misc/tegra-profiler/debug.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * drivers/misc/tegra-profiler/debug.h - * - * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - */ - -#ifndef __QUADD_DEBUG_H -#define __QUADD_DEBUG_H - -#include - -/* #define QM_DEBUG_SAMPLES_ENABLE 1 */ - -#ifdef QM_DEBUG_SAMPLES_ENABLE -void qm_debug_handler_sample(struct pt_regs *regs); -void qm_debug_timer_forward(struct pt_regs *regs, u64 period); -void qm_debug_timer_start(struct pt_regs *regs, u64 period); -void qm_debug_timer_cancel(void); -void qm_debug_task_sched_in(pid_t prev_pid, pid_t current_pid, - int prev_nr_active); -void qm_debug_read_counter(struct quadd_event *event, u32 prev_val, u32 val); -void qm_debug_start_source(int source_type); -void qm_debug_stop_source(int source_type); -#else -static inline void qm_debug_handler_sample(struct pt_regs *regs) -{ -} -static inline void qm_debug_timer_forward(struct pt_regs *regs, u64 period) -{ -} -static inline void qm_debug_timer_start(struct pt_regs *regs, u64 period) -{ -} -static inline void qm_debug_timer_cancel(void) -{ -} -static inline void -qm_debug_task_sched_in(pid_t prev_pid, pid_t current_pid, int prev_nr_active) -{ -} -static inline void -qm_debug_read_counter(struct quadd_event *event, u32 prev_val, u32 val) -{ -} -static inline void qm_debug_start_source(int source_type) -{ -} -static inline void qm_debug_stop_source(int source_type) -{ -} -#endif - -void quadd_test_delay(void); - -#define QM_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) -static inline const char * -quadd_get_hw_event_str(unsigned int event) -{ - static const char * const str[] = { - [QUADD_EVENT_HW_CPU_CYCLES] = "cpu-cycles", - - [QUADD_EVENT_HW_INSTRUCTIONS] = "instructions", - [QUADD_EVENT_HW_BRANCH_INSTRUCTIONS] = "branch_instruction", - [QUADD_EVENT_HW_BRANCH_MISSES] = "branch_misses", - [QUADD_EVENT_HW_BUS_CYCLES] = "bus-cycles", - - [QUADD_EVENT_HW_L1_DCACHE_READ_MISSES] = "l1_d_read", - [QUADD_EVENT_HW_L1_DCACHE_WRITE_MISSES] = "l1_d_write", - [QUADD_EVENT_HW_L1_ICACHE_MISSES] = "l1_i", - - [QUADD_EVENT_HW_L2_DCACHE_READ_MISSES] = "l2_d_read", - [QUADD_EVENT_HW_L2_DCACHE_WRITE_MISSES] = "l2_d_write", - [QUADD_EVENT_HW_L2_ICACHE_MISSES] = "l2_i", - }; - - if (event >= QM_ARRAY_SIZE(str)) - return "invalid event"; - - spec_bar(); - return str[event]; -} - -#endif /* __QUADD_DEBUG_H */ diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/hrt.c b/kernel/nvidia/drivers/misc/tegra-profiler/hrt.c index 267ba328be..2aab229e01 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/hrt.c +++ b/kernel/nvidia/drivers/misc/tegra-profiler/hrt.c @@ -1,7 +1,7 @@ /* * drivers/misc/tegra-profiler/hrt.c * - * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -44,7 +44,6 @@ #include "ma.h" #include "power_clk.h" #include "tegra.h" -#include "debug.h" static struct quadd_hrt_ctx hrt = { .active = ATOMIC_INIT(0), @@ -252,10 +251,6 @@ static void put_header(int cpuid, bool is_uncore) if (param->use_freq) hdr->flags |= QUADD_HDR_FLAG_USE_FREQ; -#ifdef QM_DEBUG_SAMPLES_ENABLE - hdr->flags |= QUADD_HDR_FLAG_DEBUG_SAMPLES; -#endif - hdr->freq = param->freq; hdr->ma_freq = param->ma_freq; hdr->power_rate_freq = param->power_rate_freq; @@ -606,7 +601,7 @@ read_all_sources(struct pt_regs *regs, struct task_struct *task, u64 ts) u32 value = (u32)cpu_ctx->events[i].delta; if (value > 0) { - s->events_flags |= 1 << i; + s->events_flags |= 1U << i; events_extra[nr_positive_events++] = value; } } @@ -660,13 +655,10 @@ static enum hrtimer_restart hrtimer_handler(struct hrtimer *hrtimer) if (!atomic_read(&hrt.active)) return HRTIMER_NORESTART; - qm_debug_handler_sample(regs); - if (regs) read_all_sources(regs, current, quadd_get_time()); hrtimer_forward_now(hrtimer, ns_to_ktime(hrt.sample_period)); - qm_debug_timer_forward(regs, hrt.sample_period); return HRTIMER_RESTART; } @@ -683,14 +675,11 @@ static void start_hrtimer(struct quadd_cpu_context *cpu_ctx) hrtimer_start(&cpu_ctx->hrtimer, ns_to_ktime(period), HRTIMER_MODE_REL_PINNED); #endif - - qm_debug_timer_start(NULL, period); } static void cancel_hrtimer(struct quadd_cpu_context *cpu_ctx) { hrtimer_cancel(&cpu_ctx->hrtimer); - qm_debug_timer_cancel(); } static void init_hrtimer(struct quadd_cpu_context *cpu_ctx) diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/ma.c b/kernel/nvidia/drivers/misc/tegra-profiler/ma.c index 91decf5b41..36cf14ece8 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/ma.c +++ b/kernel/nvidia/drivers/misc/tegra-profiler/ma.c @@ -1,7 +1,7 @@ /* * drivers/misc/tegra-profiler/ma.c * - * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -26,7 +26,6 @@ #include "quadd.h" #include "hrt.h" #include "comm.h" -#include "debug.h" static void make_sample(struct quadd_hrt_ctx *hrt_ctx, pid_t pid, unsigned long vm_size, diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/main.c b/kernel/nvidia/drivers/misc/tegra-profiler/main.c index d1c83253f5..b21590c207 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/main.c +++ b/kernel/nvidia/drivers/misc/tegra-profiler/main.c @@ -1,7 +1,7 @@ /* * drivers/misc/tegra-profiler/main.c * - * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2013-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -28,7 +28,6 @@ #include "hrt.h" #include "comm.h" #include "mmap.h" -#include "debug.h" #include "tegra.h" #include "power_clk.h" #include "auth.h" @@ -193,6 +192,12 @@ validate_freq(unsigned int freq) return freq >= 100 && freq <= 100000; } +static inline bool +validate_clk_freq(unsigned int freq) +{ + return freq > 0 && freq <= 1000; +} + static int set_parameters_for_cpu(struct quadd_pmu_setup_for_cpu *params) { @@ -366,6 +371,11 @@ set_parameters(struct quadd_parameters *p) if (ctx.mode_is_sampling && !validate_freq(p->freq)) return -EINVAL; + if (p->power_rate_freq != 0 && !validate_clk_freq(p->power_rate_freq)) + return -EINVAL; + if (p->ma_freq != 0 && !validate_clk_freq(p->ma_freq)) + return -EINVAL; + ctx.exclude_user = extra & QUADD_PARAM_EXTRA_EXCLUDE_USER ? 1 : 0; ctx.exclude_kernel = @@ -748,7 +758,7 @@ static inline void pmu_deinit(void) int quadd_late_init(void) { - int i, nr_events, nr_ctrs, err; + int nr_events, nr_ctrs, err; unsigned int raw_event_mask; struct quadd_event *events; struct source_info *pmu_info; @@ -784,13 +794,6 @@ int quadd_late_init(void) pmu_info->nr_supp_events = nr_events; pmu_info->raw_event_mask = raw_event_mask; pmu_info->nr_ctrs = nr_ctrs; - - pr_debug("CPU: %d PMU: events: %d, raw mask: %#x\n", - cpuid, nr_events, raw_event_mask); - - for (i = 0; i < nr_events; i++) - pr_debug("CPU: %d PMU event: %s\n", cpuid, - quadd_get_hw_event_str(events[i].id)); } } diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/power_clk.c b/kernel/nvidia/drivers/misc/tegra-profiler/power_clk.c index 161bd7f313..429f0b3eb2 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/power_clk.c +++ b/kernel/nvidia/drivers/misc/tegra-profiler/power_clk.c @@ -1,7 +1,7 @@ /* * drivers/misc/tegra-profiler/power_clk.c * - * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2013-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -34,7 +34,6 @@ #include "quadd.h" #include "hrt.h" #include "comm.h" -#include "debug.h" #define PCLK_MAX_VALUES 32 diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/tegra23x_pmu_dsu.c b/kernel/nvidia/drivers/misc/tegra-profiler/tegra23x_pmu_dsu.c index a9a30dfdbe..c52a9529af 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/tegra23x_pmu_dsu.c +++ b/kernel/nvidia/drivers/misc/tegra-profiler/tegra23x_pmu_dsu.c @@ -1,7 +1,7 @@ /* * drivers/misc/tegra-profiler/tegra23x_pmu_dsu.c * - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -35,7 +35,6 @@ #include "tegra23x_pmu_dsu.h" #include "quadd.h" -#include "debug.h" #define CPU_CYCLES 0x11 @@ -131,7 +130,7 @@ static void tegra23x_pmu_dsu_disable(void) return; pmcr = __dsu_pmu_read_pmcr(); - pmcr &= ~CLUSTERPMCR_E; + pmcr &= ~((u32)CLUSTERPMCR_E); __dsu_pmu_write_pmcr(pmcr); memset(unit->cntrs, 0, sizeof(unit->cntrs)); diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/tegra23x_pmu_scf.c b/kernel/nvidia/drivers/misc/tegra-profiler/tegra23x_pmu_scf.c index 21eed58657..4206ad0bbd 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/tegra23x_pmu_scf.c +++ b/kernel/nvidia/drivers/misc/tegra-profiler/tegra23x_pmu_scf.c @@ -1,7 +1,7 @@ /* * drivers/misc/tegra-profiler/tegra23x_pmu_scf.c * - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -37,7 +37,6 @@ #include "tegra23x_pmu_scf.h" #include "quadd.h" -#include "debug.h" #define BUS_ACCESS 0x19 #define BUS_CYCLES 0x1D diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/uncore_events.c b/kernel/nvidia/drivers/misc/tegra-profiler/uncore_events.c index 605e6d9ebf..866390c9bd 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/uncore_events.c +++ b/kernel/nvidia/drivers/misc/tegra-profiler/uncore_events.c @@ -1,7 +1,7 @@ /* * drivers/misc/tegra-profiler/uncore_events.c * - * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -116,7 +116,7 @@ put_sample(const struct quadd_event_data *events, int nr_events, u64 ts) u32 value = (u32)events[i].delta; if (value > 0) { - s->events_flags |= 1 << events[i].out_idx; + s->events_flags |= 1U << events[i].out_idx; events_extra[nr_positive++] = value; } } diff --git a/kernel/nvidia/drivers/misc/tegra-profiler/version.h b/kernel/nvidia/drivers/misc/tegra-profiler/version.h index ba2f81d318..4427568ec7 100644 --- a/kernel/nvidia/drivers/misc/tegra-profiler/version.h +++ b/kernel/nvidia/drivers/misc/tegra-profiler/version.h @@ -1,7 +1,7 @@ /* * drivers/misc/tegra-profiler/version.h * - * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2013-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -17,7 +17,7 @@ #ifndef __QUADD_VERSION_H #define __QUADD_VERSION_H -#define QUADD_MODULE_VERSION "1.150" +#define QUADD_MODULE_VERSION "1.151" #define QUADD_MODULE_BRANCH "Dev" #endif /* __QUADD_VERSION_H */ diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/Makefile b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/Makefile index c6e8f36d6e..329b8e35ad 100644 --- a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/Makefile +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/Makefile @@ -1,4 +1,4 @@ -# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -12,18 +12,19 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +ifneq ($(CONFIG_TEGRA_LINUX_SAFETY),y) + OSI_COMMON := nvethernetrm/osi/common OSI_CORE := nvethernetrm/osi/core OSI_DMA := nvethernetrm/osi/dma obj-$(CONFIG_NVETHERNET) += nvethernet.o -ccflags-y += -DLINUX_IVC -DUPDATED_PAD_CAL \ - -I$(srctree.nvidia)/drivers/net/ethernet/nvidia/nvethernet/nvethernetrm/include \ +# These CFLAGS must not be shared/used in OSI. These are local to Linux +ccflags-y += -DLINUX_OS -DNET30 -DNVPKCS_MACSEC -DLINUX_IVC -DUPDATED_PAD_CAL \ + -I$(srctree.nvidia)/drivers/net/ethernet/nvidia/nvethernet/nvethernetrm/include \ -I$(srctree.nvidia)/drivers/net/ethernet/nvidia/nvethernet/nvethernetrm/osi/common/include -ccflags-y += -DMACSEC_SUPPORT -DNET30 -DMACSEC_DEBUG -DOSI_DEBUG -DHSI_SUPPORT - nvethernet-objs:= ether_linux.o \ osd.o \ ethtool.o \ @@ -56,4 +57,8 @@ nvethernet-objs:= ether_linux.o \ $(OSI_CORE)/vlan_filter.o \ $(OSI_CORE)/debug.o +include $(srctree.nvidia)/drivers/net/ethernet/nvidia/nvethernet/nvethernetrm/include/config.tmk + nvethernet-$(CONFIG_NVETHERNET_SELFTESTS) += selftests.o + +endif diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_export.h b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_export.h new file mode 100644 index 0000000000..02c459fed4 --- /dev/null +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_export.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef ETHER_EXPORT_H +#define ETHER_EXPORT_H + +#include +/** + * @addtogroup private IOCTL related info + * + * @brief MACRO are defined for driver supported + * private IOCTLs. These IOCTLs can be called using + * SIOCDEVPRIVATE custom ioctl command. + * @{ + */ +/** To set HW AVB configuration from user application */ +#define ETHER_AVB_ALGORITHM 27 +/** To get current configuration in HW */ +#define ETHER_GET_AVB_ALGORITHM 46 +/** To configure EST(802.1 bv) in HW */ +#define ETHER_CONFIG_EST 49 +/** For configure FPE (802.1 bu + 803.2 br) in HW */ +#define ETHER_CONFIG_FPE 50 +/* FRP command */ +#define ETHER_CONFIG_FRP_CMD 51 +/** To configure L2 Filter (Only with Ethernet virtualization) */ +#define ETHER_L2_ADDR 61 +/** @} */ + +/** + * @brief Structure for L2 filters input + */ +struct ether_l2_filter { + /** indicates enable(1)/disable(0) L2 filter */ + + nveu32_t en_dis; + /** Indicates the index of the filter to be modified. + * Filter index must be between 0 - 31 */ + nveu32_t index; + /** Ethernet MAC address to be added */ + nveu8_t mac_address[OSI_ETH_ALEN]; +}; + +/** + * @brief struct ether_exported_ifr_data - Private data of struct ifreq + */ +struct ether_exported_ifr_data { + /** Flags used for specific ioctl - like enable/disable */ + nveu32_t if_flags; + /** qinx: Queue index to be used for certain ioctls */ + nveu32_t qinx; + /** The private ioctl command number */ + nveu32_t ifcmd; + /** Used to query the connected link speed */ + nveu32_t connected_speed; + /** The return value of IOCTL handler func */ + nve32_t command_error; + /** IOCTL cmd specific structure pointer */ + void *ptr; +}; +#endif /* ETHER_EXPORT_H */ diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c index 0de9591bff..41ac5223c3 100644 --- a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -146,7 +146,7 @@ static irqreturn_t ether_common_isr_thread(int irq, void *data) /* Called from ether_hsi_work */ if (osi_core->hsi.report_err && irq == 0) { osi_core->hsi.report_err = OSI_DISABLE; - for (i = 0; i < HSI_MAX_MAC_ERROR_CODE; i++) { + for (i = 0; i < OSI_HSI_MAX_MAC_ERROR_CODE; i++) { if (osi_core->hsi.err_code[i] > 0) { error_report.error_code = osi_core->hsi.err_code[i]; @@ -189,7 +189,7 @@ static irqreturn_t ether_common_isr_thread(int irq, void *data) /* Called from interrupt handler */ if (osi_core->hsi.report_err && irq != 0) { - for (i = 0; i < HSI_MAX_MAC_ERROR_CODE; i++) { + for (i = 0; i < OSI_HSI_MAX_MAC_ERROR_CODE; i++) { if (osi_core->hsi.err_code[i] > 0 && osi_core->hsi.report_count_err[i] == OSI_ENABLE) { error_report.error_code = @@ -760,6 +760,11 @@ int ether_conf_eee(struct ether_priv_data *pdata, unsigned int tx_lpi_enable) unsigned int enable = tx_lpi_enable; struct osi_ioctl ioctl_data = {}; + if (!phydev) { + dev_err(pdata->dev, "%s() phydev is NULL\n", __func__); + return -ENODEV; + } + if (tx_lpi_enable) { /* phy_init_eee() returns 0 if EEE is supported by the PHY */ if (phy_init_eee(phydev, @@ -884,6 +889,13 @@ static inline void set_speed_work_func(struct work_struct *work) return; } + if (atomic_read(&pdata->set_speed_ref_cnt) == 1) { + /* set_speed already going on either from workq or interrupt */ + return; + } + + atomic_set(&pdata->set_speed_ref_cnt, OSI_ENABLE); + /* Speed will be overwritten as per the PHY interface mode */ speed = phydev->speed; /* MAC and XFI speed should match in XFI mode */ @@ -903,6 +915,7 @@ static inline void set_speed_work_func(struct work_struct *work) netdev_dbg(dev, "Retry set speed\n"); schedule_delayed_work(&pdata->set_speed_work, msecs_to_jiffies(1000)); + atomic_set(&pdata->set_speed_ref_cnt, OSI_DISABLE); return; } @@ -918,6 +931,8 @@ static inline void set_speed_work_func(struct work_struct *work) } pdata->eee_active = ether_conf_eee(pdata, eee_enable); netif_carrier_on(dev); + + atomic_set(&pdata->set_speed_ref_cnt, OSI_DISABLE); } static void ether_en_dis_monitor_clks(struct ether_priv_data *pdata, @@ -1073,8 +1088,8 @@ static void ether_adjust_link(struct net_device *dev) if (!pdata->oldlink) { new_state = 1; pdata->oldlink = 1; - val = pdata->osi_core->xstats.link_connect_count; - pdata->osi_core->xstats.link_connect_count = + val = pdata->xstats.link_connect_count; + pdata->xstats.link_connect_count = osi_update_stats_counter(val, 1UL); } } else if (pdata->oldlink) { @@ -1082,8 +1097,8 @@ static void ether_adjust_link(struct net_device *dev) pdata->oldlink = 0; pdata->speed = 0; pdata->oldduplex = -1; - val = pdata->osi_core->xstats.link_disconnect_count; - pdata->osi_core->xstats.link_disconnect_count = + val = pdata->xstats.link_disconnect_count; + pdata->xstats.link_disconnect_count = osi_update_stats_counter(val, 1UL); ether_en_dis_monitor_clks(pdata, OSI_DISABLE); } else { @@ -1266,7 +1281,6 @@ static irqreturn_t ether_tx_chan_isr(int irq, void *data) struct ether_tx_napi *tx_napi = (struct ether_tx_napi *)data; struct ether_priv_data *pdata = tx_napi->pdata; struct osi_dma_priv_data *osi_dma = pdata->osi_dma; - struct osi_core_priv_data *osi_core = pdata->osi_core; unsigned int chan = tx_napi->chan; unsigned long flags; unsigned long val; @@ -1277,8 +1291,8 @@ static irqreturn_t ether_tx_chan_isr(int irq, void *data) OSI_DMA_INTR_DISABLE); raw_spin_unlock_irqrestore(&pdata->rlock, flags); - val = osi_core->xstats.tx_normal_irq_n[chan]; - osi_core->xstats.tx_normal_irq_n[chan] = + val = pdata->xstats.tx_normal_irq_n[chan]; + pdata->xstats.tx_normal_irq_n[chan] = osi_update_stats_counter(val, 1U); if (likely(napi_schedule_prep(&tx_napi->napi))) { @@ -1315,7 +1329,6 @@ static irqreturn_t ether_rx_chan_isr(int irq, void *data) struct ether_rx_napi *rx_napi = (struct ether_rx_napi *)data; struct ether_priv_data *pdata = rx_napi->pdata; struct osi_dma_priv_data *osi_dma = pdata->osi_dma; - struct osi_core_priv_data *osi_core = pdata->osi_core; unsigned int chan = rx_napi->chan; unsigned long val, flags; @@ -1325,8 +1338,8 @@ static irqreturn_t ether_rx_chan_isr(int irq, void *data) OSI_DMA_INTR_DISABLE); raw_spin_unlock_irqrestore(&pdata->rlock, flags); - val = osi_core->xstats.rx_normal_irq_n[chan]; - osi_core->xstats.rx_normal_irq_n[chan] = + val = pdata->xstats.rx_normal_irq_n[chan]; + pdata->xstats.rx_normal_irq_n[chan] = osi_update_stats_counter(val, 1U); if (likely(napi_schedule_prep(&rx_napi->napi))) { @@ -1427,32 +1440,6 @@ static void ether_free_irqs(struct ether_priv_data *pdata) } } -/** - * @brief IVC ISR Routine - * - * Algorithm: IVC routine to handle common interrupt. - * 1) Verify if IVC channel is readable - * 2) Read IVC msg - * 3) Schedule ivc_work - * - * @param[in] irq: IRQ number. - * @param[in] data: Private data from ISR. - * - * @note MAC and PHY need to be initialized. - * - * @retval IRQ_HANDLED on success - * @retval IRQ_NONE on failure. - */ -static irqreturn_t ether_ivc_irq(int irq, void *data) -{ - struct ether_priv_data *pdata = (struct ether_priv_data *)data; - struct ether_ivc_ctxt *ictxt = &pdata->ictxt; - - complete(&ictxt->msg_complete); - - return IRQ_HANDLED; -} - /** * @brief Start IVC, initializes IVC. * @@ -1463,23 +1450,11 @@ static irqreturn_t ether_ivc_irq(int irq, void *data) static void ether_start_ivc(struct ether_priv_data *pdata) { - int ret; struct ether_ivc_ctxt *ictxt = &pdata->ictxt; if (ictxt->ivck != NULL && !ictxt->ivc_state) { tegra_hv_ivc_channel_reset(ictxt->ivck); - - ret = devm_request_irq(pdata->dev, ictxt->ivck->irq, - ether_ivc_irq, - 0, dev_name(pdata->dev), pdata); - if (ret) { - dev_err(pdata->dev, - "Unable to request irq(%d)\n", ictxt->ivck->irq); - tegra_hv_ivc_unreserve(ictxt->ivck); - return; - } ictxt->ivc_state = 1; - // initialize - mutex_init(&ictxt->ivck_lock); + raw_spin_lock_init(&ictxt->ivck_lock); } } @@ -1496,7 +1471,6 @@ static void ether_stop_ivc(struct ether_priv_data *pdata) struct ether_ivc_ctxt *ictxt = &pdata->ictxt; if (ictxt->ivck != NULL) { tegra_hv_ivc_unreserve(ictxt->ivck); - devm_free_irq(pdata->dev, ictxt->ivck->irq, pdata); ictxt->ivc_state = 0; } } @@ -1552,7 +1526,6 @@ static int ether_init_ivc(struct ether_priv_data *pdata) dev_info(dev, "Reserved IVC channel #%u - frame_size=%d irq %d\n", id, ictxt->ivck->frame_size, ictxt->ivck->irq); osi_core->osd_ops.ivc_send = osd_ivc_send_cmd; - init_completion(&ictxt->msg_complete); ether_start_ivc(pdata); return 0; } @@ -2471,7 +2444,11 @@ static int ether_mdio_register(struct ether_priv_data *pdata) new_bus->name = "nvethernet_mdio_bus"; new_bus->read = ether_mdio_read; new_bus->write = ether_mdio_write; - snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + ret = snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + if (ret < 0) { + dev_err(dev, "%s:encoding error", __func__); + goto exit; + } new_bus->priv = pdata->ndev; new_bus->parent = dev; @@ -2586,9 +2563,7 @@ static int ether_open(struct net_device *dev) } /* initialize MAC/MTL/DMA Common registers */ - ret = osi_hw_core_init(pdata->osi_core, - pdata->hw_feat.tx_fifo_size, - pdata->hw_feat.rx_fifo_size); + ret = osi_hw_core_init(pdata->osi_core); if (ret < 0) { dev_err(pdata->dev, "%s: failed to initialize MAC HW core with reason %d\n", @@ -2673,15 +2648,6 @@ static int ether_open(struct net_device *dev) /* Init EEE configuration */ ether_init_eee_params(pdata); - /* Start the MAC */ - ioctl_data.cmd = OSI_CMD_START_MAC; - ret = osi_handle_ioctl(pdata->osi_core, &ioctl_data); - if (ret < 0) { - dev_err(&dev->dev, - "%s: failed to start MAC %d\n", - __func__, ret); - goto err_r_irq; - } /* start PHY */ phy_start(pdata->phydev); @@ -2750,7 +2716,7 @@ static int ether_open(struct net_device *dev) * * Algorithm: This routine clears the following sw stats structures. * 1) struct osi_mmc_counters - * 2) struct osi_xtra_stat_counters + * 2) struct ether_xtra_stat_counters * 3) struct osi_xtra_dma_stat_counters * 4) struct osi_pkt_err_stats * @@ -2764,8 +2730,8 @@ static inline void ether_reset_stats(struct ether_priv_data *pdata) struct osi_dma_priv_data *osi_dma = pdata->osi_dma; memset(&osi_core->mmc, 0U, sizeof(struct osi_mmc_counters)); - memset(&osi_core->xstats, 0U, - sizeof(struct osi_xtra_stat_counters)); + memset(&pdata->xstats, 0U, + sizeof(struct ether_xtra_stat_counters)); memset(&osi_dma->dstats, 0U, sizeof(struct osi_xtra_dma_stat_counters)); memset(&osi_dma->pkt_err_stats, 0U, sizeof(struct osi_pkt_err_stats)); @@ -2822,7 +2788,6 @@ static inline void ether_delete_l2_filter(struct ether_priv_data *pdata) if (ret < 0) { dev_err(pdata->dev, "failed to delete L2 filter index = %d\n", i); - mutex_unlock(&pdata->rx_mode_lock); return; } } @@ -2956,7 +2921,7 @@ static int ether_close(struct net_device *ndev) /* stop tx ts pending SKB workqueue and remove skb nodes */ ether_flush_tx_ts_skb_list(pdata); - cancel_work_sync(&pdata->set_rx_mode_work); + tasklet_kill(&pdata->lane_restart_task); ether_stop_ivc(pdata); @@ -2979,15 +2944,6 @@ static int ether_close(struct net_device *ndev) if (pdata->osi_core->mac == OSI_MAC_HW_MGBE) pm_runtime_put_sync(pdata->dev); -#ifdef MACSEC_SUPPORT -#ifdef DEBUG_MACSEC - ret = macsec_close(pdata->macsec_pdata); - if (ret < 0) { - dev_err(pdata->dev, "Failed to close macsec"); - } -#endif -#endif /* MACSEC_SUPPORT */ - /* Reset stats since interface is going down */ ether_reset_stats(pdata); @@ -3152,7 +3108,6 @@ static int ether_tx_swcx_alloc(struct ether_priv_data *pdata, if (unlikely(skb_vlan_tag_present(skb))) { tx_pkt_cx->vtag_id = skb_vlan_tag_get(skb); - tx_pkt_cx->vtag_id |= (skb->priority << VLAN_PRIO_SHIFT); tx_pkt_cx->flags |= OSI_PKT_CX_VLAN; } @@ -3334,11 +3289,10 @@ static unsigned short ether_select_queue(struct net_device *dev, struct osi_core_priv_data *osi_core = pdata->osi_core; unsigned short txqueue_select = 0; unsigned int i, mtlq; - u16 vlan_tci; unsigned int priority = skb->priority; - if (vlan_get_tag(skb, &vlan_tci) == 0) { - priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + if (skb_vlan_tag_present(skb)) { + priority = skb_vlan_tag_get_prio(skb); } for (i = 0; i < osi_core->num_mtl_queues; i++) { @@ -3623,24 +3577,24 @@ static int ether_prepare_uc_list(struct net_device *dev, } /** - * @brief Work Queue function to call rx mode. + * @brief This function is used to set RX mode. * - * @param[in] work: work structure + * Algorithm: Based on Network interface flag, MAC registers are programmed to + * set mode. + * + * @param[in] dev - pointer to net_device structure. * * @note MAC and PHY need to be initialized. */ -static inline void set_rx_mode_work_func(struct work_struct *work) +void ether_set_rx_mode(struct net_device *dev) { - struct ether_priv_data *pdata = container_of(work, - struct ether_priv_data, set_rx_mode_work); + struct ether_priv_data *pdata = netdev_priv(dev); struct osi_core_priv_data *osi_core = pdata->osi_core; /* store last call last_uc_filter_index in temporary variable */ struct osi_ioctl ioctl_data = {}; - struct net_device *dev = pdata->ndev; unsigned int mac_addr_idx = ETHER_MAC_ADDRESS_INDEX + 1U, i; int ret = -1; - mutex_lock(&pdata->rx_mode_lock); memset(&ioctl_data.l2_filter, 0x0, sizeof(struct osi_filter)); if ((dev->flags & IFF_PROMISC) == IFF_PROMISC) { if (pdata->promisc_mode == OSI_ENABLE) { @@ -3659,8 +3613,6 @@ static inline void set_rx_mode_work_func(struct work_struct *work) dev_warn(pdata->dev, "Promiscuous mode not supported\n"); } - - mutex_unlock(&pdata->rx_mode_lock); return; } else if ((dev->flags & IFF_ALLMULTI) == IFF_ALLMULTI) { ioctl_data.l2_filter.oper_mode = (OSI_OPER_EN_ALLMULTI | @@ -3672,8 +3624,6 @@ static inline void set_rx_mode_work_func(struct work_struct *work) if (ret < 0) { dev_err(pdata->dev, "Setting All Multicast allow mode failed\n"); } - - mutex_unlock(&pdata->rx_mode_lock); return; } else if (!netdev_mc_empty(dev)) { if (ether_prepare_mc_list(dev, &ioctl_data, &mac_addr_idx) != 0) { @@ -3708,7 +3658,6 @@ static inline void set_rx_mode_work_func(struct work_struct *work) if (ret < 0) { dev_err(pdata->dev, "failed to delete L2 filter index = %d\n", i); - mutex_unlock(&pdata->rx_mode_lock); return; } } @@ -3728,28 +3677,9 @@ static inline void set_rx_mode_work_func(struct work_struct *work) if (ret < 0) { dev_err(pdata->dev, "failed to set operation mode\n"); } - - mutex_unlock(&pdata->rx_mode_lock); return; } -/** - * @brief This function is used to set RX mode. - * - * Algorithm: Based on Network interface flag, MAC registers are programmed to - * set mode. - * - * @param[in] dev - pointer to net_device structure. - * - * @note MAC and PHY need to be initialized. - */ -void ether_set_rx_mode(struct net_device *dev) -{ - struct ether_priv_data *pdata = netdev_priv(dev); - - schedule_work(&pdata->set_rx_mode_work); -} - /** * @brief Function to handle PHY read private IOCTL * @@ -4016,15 +3946,12 @@ static int ether_change_mtu(struct net_device *ndev, int new_mtu) osi_dma->mtu = new_mtu; #ifdef MACSEC_SUPPORT - /* Macsec is supported, reduce MTU */ - if ((osi_core->mac == OSI_MAC_HW_EQOS && osi_core->mac_ver == OSI_EQOS_MAC_5_30) || + /* Macsec is not supported or not enabled in DT */ + if (!pdata->macsec_pdata) { + netdev_info(pdata->ndev, "Macsec not supported or not enabled in DT\n"); + } else if ((osi_core->mac == OSI_MAC_HW_EQOS && osi_core->mac_ver == OSI_EQOS_MAC_5_30) || (osi_core->mac == OSI_MAC_HW_MGBE && osi_core->mac_ver == OSI_MGBE_MAC_3_10)) { - ret = osi_macsec_update_mtu(osi_core, new_mtu); - if (ret < 0) { - dev_err(pdata->dev, "Failed to set MACSEC MTU to %d\n", - new_mtu); - return -EINVAL; - } + /* Macsec is supported, reduce MTU */ ndev->mtu -= MACSEC_TAG_ICV_LEN; netdev_info(pdata->ndev, "Macsec: Reduced MTU: %d Max: %d\n", ndev->mtu, ndev->max_mtu); @@ -4324,11 +4251,10 @@ static enum hrtimer_restart ether_tx_usecs_hrtimer(struct hrtimer *data) struct ether_tx_napi *tx_napi = container_of(data, struct ether_tx_napi, tx_usecs_timer); struct ether_priv_data *pdata = tx_napi->pdata; - struct osi_core_priv_data *osi_core = pdata->osi_core; unsigned long val; - val = osi_core->xstats.tx_usecs_swtimer_n[tx_napi->chan]; - osi_core->xstats.tx_usecs_swtimer_n[tx_napi->chan] = + val = pdata->xstats.tx_usecs_swtimer_n[tx_napi->chan]; + pdata->xstats.tx_usecs_swtimer_n[tx_napi->chan] = osi_update_stats_counter(val, 1U); atomic_set(&pdata->tx_napi[tx_napi->chan]->tx_usecs_timer_armed, @@ -4674,69 +4600,60 @@ static int ether_get_mac_address(struct ether_priv_data *pdata) unsigned int mac_addr_idx = 0x0; int ret = 0; - if (!osi_core->pre_si) { - /** For all new Platforms, ethernet DT node must have - * "nvidia,mac-addr-idx" property which give MAC address - * index of ethernet controller. - * - * - Algorithm: MAC address index for a functional driver is - * known from platform dts file. - * - * For example: - * if there is MGBE controller DT node with index 8 MGBE, - * MAC address is at /chosen/nvidia,ether-mac8 - */ - if ((pdata->osi_core->mac_ver > OSI_EQOS_MAC_5_10) || - (pdata->osi_core->mac == OSI_MAC_HW_MGBE)) { - ret = of_property_read_u32(np, - "nvidia,mac-addr-idx", - &mac_addr_idx); - if (ret < 0) { - dev_err(dev, - "Ethernet MAC index missing\n"); - /* TODO Must return error if index is not - * present in ethernet dt node - * which is having status "okay". - */ - } - - offset = mac_addr_idx; - sprintf(str_mac_address, "nvidia,ether-mac%d", offset); - } - - ret = ether_get_mac_address_dtb("/chosen", str_mac_address, - mac_addr); - /* If return value is valid update eth_mac_addr */ - if (ret == 0) { - eth_mac_addr = mac_addr; + /** For all new Platforms, ethernet DT node must have + * "nvidia,mac-addr-idx" property which give MAC address + * index of ethernet controller. + * + * - Algorithm: MAC address index for a functional driver is + * known from platform dts file. + * + * For example: + * if there is MGBE controller DT node with index 8 MGBE, + * MAC address is at /chosen/nvidia,ether-mac8 + */ + if ((pdata->osi_core->mac_ver > OSI_EQOS_MAC_5_10) || + (pdata->osi_core->mac == OSI_MAC_HW_MGBE)) { + ret = of_property_read_u32(np, "nvidia,mac-addr-idx", + &mac_addr_idx); + if (ret < 0) { + dev_err(dev, "Ethernet MAC index missing\n"); + /* TODO Must return error if index is not + * present in ethernet dt node + * which is having status "okay". + */ } - /* if chosen nodes are not present for platform */ - if (IS_ERR_OR_NULL(eth_mac_addr)) { - /* Read MAC address using default ethernet property - * upstream driver should have only this call to get - * MAC address - */ - eth_mac_addr = of_get_mac_address(np); + offset = mac_addr_idx; + sprintf(str_mac_address, "nvidia,ether-mac%d", offset); + } - if (IS_ERR_OR_NULL(eth_mac_addr)) { - dev_err(dev, "No MAC address in local DT!\n"); - return -EINVAL; - } - } + ret = ether_get_mac_address_dtb("/chosen", str_mac_address, mac_addr); + /* If return value is valid update eth_mac_addr */ + if (ret == 0) { + eth_mac_addr = mac_addr; + } - /* If neither chosen node nor kernel supported dt strings are - * present in platform device tree. + /* if chosen nodes are not present for platform */ + if (IS_ERR_OR_NULL(eth_mac_addr)) { + /* Read MAC address using default ethernet property + * upstream driver should have only this call to get + * MAC address */ - if (!(is_valid_ether_addr(eth_mac_addr)) || - IS_ERR_OR_NULL(eth_mac_addr)) { - dev_err(dev, "Bad mac address exiting\n"); + eth_mac_addr = of_get_mac_address(np); + + if (IS_ERR_OR_NULL(eth_mac_addr)) { + dev_err(dev, "No MAC address in local DT!\n"); return -EINVAL; } - } else { - ndev->addr_assign_type = NET_ADDR_RANDOM; - eth_random_addr(mac_addr); - eth_mac_addr = mac_addr; + } + + /* If neither chosen node nor kernel supported dt strings are + * present in platform device tree. + */ + if (!(is_valid_ether_addr(eth_mac_addr)) || + IS_ERR_OR_NULL(eth_mac_addr)) { + dev_err(dev, "Bad mac address exiting\n"); + return -EINVAL; } /* Found a valid mac address */ @@ -4923,31 +4840,31 @@ static int ether_get_mgbe_clks(struct ether_priv_data *pdata) struct device *dev = pdata->dev; int ret; - pdata->rx_m_clk = devm_clk_get(dev, "rx_input_m"); + pdata->rx_m_clk = devm_clk_get(dev, "rx-input-m"); if (IS_ERR(pdata->rx_m_clk)) { ret = PTR_ERR(pdata->rx_m_clk); - dev_err(dev, "failed to get rx_input_m\n"); + dev_err(dev, "failed to get rx-input-m\n"); goto err_rx_m; } - pdata->rx_pcs_m_clk = devm_clk_get(dev, "rx_pcs_m"); + pdata->rx_pcs_m_clk = devm_clk_get(dev, "rx-pcs-m"); if (IS_ERR(pdata->rx_pcs_m_clk)) { ret = PTR_ERR(pdata->rx_pcs_m_clk); - dev_err(dev, "failed to get rx_pcs_m clk\n"); + dev_err(dev, "failed to get rx-pcs-m clk\n"); goto err_rx_pcs_m; } - pdata->rx_pcs_input_clk = devm_clk_get(dev, "rx_pcs_input"); + pdata->rx_pcs_input_clk = devm_clk_get(dev, "rx-pcs-input"); if (IS_ERR(pdata->rx_pcs_input_clk)) { ret = PTR_ERR(pdata->rx_pcs_input_clk); - dev_err(dev, "failed to get rx_pcs_input clk\n"); + dev_err(dev, "failed to get rx-pcs-input clk\n"); goto err_rx_pcs_input; } - pdata->rx_pcs_clk = devm_clk_get(dev, "rx_pcs"); + pdata->rx_pcs_clk = devm_clk_get(dev, "rx-pcs"); if (IS_ERR(pdata->rx_pcs_clk)) { ret = PTR_ERR(pdata->rx_pcs_clk); - dev_err(dev, "failed to get rx_pcs clk\n"); + dev_err(dev, "failed to get rx-pcs clk\n"); goto err_rx_pcs; } @@ -4958,17 +4875,17 @@ static int ether_get_mgbe_clks(struct ether_priv_data *pdata) goto err_tx; } - pdata->tx_pcs_clk = devm_clk_get(dev, "tx_pcs"); + pdata->tx_pcs_clk = devm_clk_get(dev, "tx-pcs"); if (IS_ERR(pdata->tx_pcs_clk)) { ret = PTR_ERR(pdata->tx_pcs_clk); - dev_err(dev, "failed to get tx_pcs clk\n"); + dev_err(dev, "failed to get tx-pcs clk\n"); goto err_tx_pcs; } - pdata->mac_div_clk = devm_clk_get(dev, "mac_divider"); + pdata->mac_div_clk = devm_clk_get(dev, "mac-divider"); if (IS_ERR(pdata->mac_div_clk)) { ret = PTR_ERR(pdata->mac_div_clk); - dev_err(dev, "failed to get mac_divider clk\n"); + dev_err(dev, "failed to get mac-divider clk\n"); goto err_mac_div; } @@ -4979,31 +4896,31 @@ static int ether_get_mgbe_clks(struct ether_priv_data *pdata) goto err_mac; } - pdata->eee_pcs_clk = devm_clk_get(dev, "eee_pcs"); + pdata->eee_pcs_clk = devm_clk_get(dev, "eee-pcs"); if (IS_ERR(pdata->eee_pcs_clk)) { ret = PTR_ERR(pdata->eee_pcs_clk); - dev_err(dev, "failed to get eee_pcs clk\n"); + dev_err(dev, "failed to get eee-pcs clk\n"); goto err_eee_pcs; } - pdata->app_clk = devm_clk_get(dev, "app"); + pdata->app_clk = devm_clk_get(dev, "mgbe"); if (IS_ERR(pdata->app_clk)) { ret = PTR_ERR(pdata->app_clk); - dev_err(dev, "failed to get app clk\n"); + dev_err(dev, "failed to get mgbe clk\n"); goto err_app; } - pdata->ptp_ref_clk = devm_clk_get(dev, "ptp_ref"); + pdata->ptp_ref_clk = devm_clk_get(dev, "ptp-ref"); if (IS_ERR(pdata->ptp_ref_clk)) { ret = PTR_ERR(pdata->ptp_ref_clk); - dev_err(dev, "failed to get ptp_ref clk\n"); + dev_err(dev, "failed to get ptp-ref clk\n"); goto err_ptp_ref; } - pdata->rx_input_clk = devm_clk_get(dev, "rx_input"); + pdata->rx_input_clk = devm_clk_get(dev, "rx-input"); if (IS_ERR(pdata->rx_input_clk)) { ret = PTR_ERR(pdata->rx_input_clk); - dev_err(dev, "failed to get rx_input clk\n"); + dev_err(dev, "failed to get rx-input clk\n"); goto err_rx_input; } @@ -5182,7 +5099,7 @@ static int ether_configure_car(struct platform_device *pdev, /* get MAC reset */ if (!pdata->skip_mac_reset) { - pdata->mac_rst = devm_reset_control_get(&pdev->dev, "mac_rst"); + pdata->mac_rst = devm_reset_control_get(&pdev->dev, "mac"); if (IS_ERR_OR_NULL(pdata->mac_rst)) { if (PTR_ERR(pdata->mac_rst) != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to get MAC rst\n"); @@ -5192,7 +5109,7 @@ static int ether_configure_car(struct platform_device *pdev, if (osi_core->mac == OSI_MAC_HW_MGBE) { pdata->xpcs_rst = devm_reset_control_get(&pdev->dev, - "xpcs_rst"); + "pcs"); if (IS_ERR_OR_NULL(pdata->xpcs_rst)) { dev_info(&pdev->dev, "failed to get XPCS reset\n"); return PTR_ERR(pdata->xpcs_rst); @@ -5304,7 +5221,7 @@ static int ether_init_plat_resources(struct platform_device *pdev, int ret = 0; /* get base address and remap */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac-base"); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac"); osi_core->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(osi_core->base)) { dev_err(&pdev->dev, "failed to ioremap MAC base address\n"); @@ -5312,8 +5229,7 @@ static int ether_init_plat_resources(struct platform_device *pdev, } if (!tegra_hypervisor_mode) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "hv-base"); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hypervisor"); if (res) { osi_core->hv_base = devm_ioremap_resource(&pdev->dev, res); @@ -5346,8 +5262,7 @@ static int ether_init_plat_resources(struct platform_device *pdev, } if (osi_core->mac == OSI_MAC_HW_MGBE) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "xpcs-base"); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xpcs"); if (res) { osi_core->xpcs_base = devm_ioremap_resource(&pdev->dev, res); @@ -5557,7 +5472,7 @@ static void ether_get_dma_ring_size(struct device *dev, ret = of_property_read_u32(np, "nvidia,dma_rx_ring_sz", &osi_dma->rx_ring_sz); if (ret < 0) { - dev_info(dev, "Failed to read DMA Tx ring size, using default [%d]\n", + dev_info(dev, "Failed to read DMA Rx ring size, using default [%d]\n", default_sz[osi_dma->mac]); osi_dma->rx_ring_sz = default_sz[osi_dma->mac]; } @@ -5591,6 +5506,8 @@ static int ether_parse_dt(struct ether_priv_data *pdata) int ret = -EINVAL; unsigned int i, mtlq, chan, bitmap; unsigned int dt_pad_calibration_enable; + unsigned int dt_pad_auto_cal_pu_offset; + unsigned int dt_pad_auto_cal_pd_offset; /* This variable is for DT entry which should not fail bootup */ int ret_val = 0; @@ -6037,6 +5954,44 @@ static int ether_parse_dt(struct ether_priv_data *pdata) osi_core->padctrl.pad_calibration_enable = dt_pad_calibration_enable; } + /* Read pad calibration config reg offset, default 0 */ + ret = of_property_read_u32(np, "nvidia,pad_auto_cal_pu_offset", + &dt_pad_auto_cal_pu_offset); + if (ret < 0) { + dev_info(dev, "missing nvidia,pad_auto_cal_pu_offset, " + "setting default 0\n"); + osi_core->padctrl.pad_auto_cal_pu_offset = 0U; + ret = 0; + } else if (dt_pad_auto_cal_pu_offset > + OSI_PAD_CAL_CONFIG_PD_PU_OFFSET_MAX) { + dev_err(dev, "Error: Invalid dt " + "pad_auto_cal_pu_offset: %u value\n", + dt_pad_auto_cal_pu_offset); + ret = -EINVAL; + goto exit; + } else { + osi_core->padctrl.pad_auto_cal_pu_offset = + dt_pad_auto_cal_pu_offset; + } + ret = of_property_read_u32(np, "nvidia,pad_auto_cal_pd_offset", + &dt_pad_auto_cal_pd_offset); + if (ret < 0) { + dev_info(dev, "missing nvidia,pad_auto_cal_pd_offset, " + "setting default 0\n"); + osi_core->padctrl.pad_auto_cal_pd_offset = 0U; + ret = 0; + } else if (dt_pad_auto_cal_pd_offset > + OSI_PAD_CAL_CONFIG_PD_PU_OFFSET_MAX) { + dev_err(dev, "Error: Invalid dt " + "pad_auto_cal_pu_offset: %u value\n", + dt_pad_auto_cal_pd_offset); + ret = -EINVAL; + goto exit; + } else { + osi_core->padctrl.pad_auto_cal_pd_offset = + dt_pad_auto_cal_pd_offset; + } + pdata->pin = devm_pinctrl_get(dev); if (IS_ERR(pdata->pin)) { dev_err(dev, "DT: missing eqos pinctrl device\n"); @@ -6116,14 +6071,14 @@ static void ether_get_num_dma_chan_mtl_q(struct platform_device *pdev, unsigned int max_chans = 1; int ret = 0; - ret = of_device_is_compatible(np, "nvidia,nveqos"); - if (ret != 0) { + if (of_device_is_compatible(np, "nvidia,nveqos") || + of_device_is_compatible(np, "nvidia,tegra234-eqos")) { *mac = OSI_MAC_HW_EQOS; max_chans = OSI_EQOS_MAX_NUM_CHANS; } - ret = of_device_is_compatible(np, "nvidia,nvmgbe"); - if (ret != 0) { + if (of_device_is_compatible(np, "nvidia,nvmgbe") || + of_device_is_compatible(np, "nvidia,tegra234-mgbe")) { *mac = OSI_MAC_HW_MGBE; max_chans = OSI_MGBE_MAX_NUM_CHANS; } @@ -6296,29 +6251,6 @@ static void init_filter_values(struct ether_priv_data *pdata) } } -/** - * @brief Sets whether platform is Pre-silicon or not. - * - * Algorithm: Updates OSI whether respective platform is Pre-silicon or not - * - * @param[in] osi_core: OSI core private data structure - * @param[in] osi_dma: OSI dma private data structure - */ -static inline void tegra_pre_si_platform(struct osi_core_priv_data *osi_core, - struct osi_dma_priv_data *osi_dma) -{ - /* VDK set true for both VDK/uFPGA */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) - if (tegra_platform_is_vdk()) { - osi_core->pre_si = 1; - osi_dma->pre_si = 1; - } - return; -#endif - osi_core->pre_si = 0; - osi_dma->pre_si = 0; -} - /** * @brief ether_init_rss - Init OSI RSS structure * @@ -6439,7 +6371,6 @@ static int ether_probe(struct platform_device *pdev) osi_core->mtu = ndev->mtu; osi_dma->mtu = ndev->mtu; - tegra_pre_si_platform(osi_core, osi_dma); /* Parse the ethernet DT node */ ret = ether_parse_dt(pdata); @@ -6545,6 +6476,12 @@ static int ether_probe(struct platform_device *pdev) ether_tx_usecs_hrtimer; } + ret = register_netdev(ndev); + if (ret < 0) { + dev_err(&pdev->dev, "failed to register netdev\n"); + goto err_netdev; + } + #ifdef MACSEC_SUPPORT ret = macsec_probe(pdata); if (ret < 0) { @@ -6552,7 +6489,7 @@ static int ether_probe(struct platform_device *pdev) goto err_macsec; } else if (ret == 1) { /* Nothing to do, macsec is not supported */ - dev_info(&pdev->dev, "Macsec not supported\n"); + dev_info(&pdev->dev, "Macsec not supported/Not enabled in DT\n"); } else { dev_info(&pdev->dev, "Macsec not enabled\n"); /* Macsec is supported, reduce MTU */ @@ -6562,12 +6499,6 @@ static int ether_probe(struct platform_device *pdev) } #endif /* MACSEC_SUPPORT */ - ret = register_netdev(ndev); - if (ret < 0) { - dev_err(&pdev->dev, "failed to register netdev\n"); - goto err_netdev; - } - /* Register sysfs entry */ ret = ether_sysfs_register(pdata); if (ret < 0) { @@ -6600,9 +6531,6 @@ static int ether_probe(struct platform_device *pdev) /* Initialization of delayed workqueue for HSI error reporting */ INIT_DELAYED_WORK(&pdata->ether_hsi_work, ether_hsi_work_func); #endif - mutex_init(&pdata->rx_mode_lock); - /* Initialization of delayed workqueue */ - INIT_WORK(&pdata->set_rx_mode_work, set_rx_mode_work_func); /* Initialization of set speed workqueue */ INIT_DELAYED_WORK(&pdata->set_speed_work, set_speed_work_func); osi_core->hw_feature = &pdata->hw_feat; @@ -6611,6 +6539,9 @@ static int ether_probe(struct platform_device *pdev) pdata->rx_m_enabled = false; pdata->rx_pcs_m_enabled = false; atomic_set(&pdata->tx_ts_ref_cnt, -1); + atomic_set(&pdata->set_speed_ref_cnt, OSI_DISABLE); + tasklet_setup(&pdata->lane_restart_task, + ether_restart_lane_bringup_task); #ifdef ETHER_NVGRO __skb_queue_head_init(&pdata->mq); __skb_queue_head_init(&pdata->fq); @@ -6627,11 +6558,11 @@ static int ether_probe(struct platform_device *pdev) return 0; err_sysfs: - unregister_netdev(ndev); -err_netdev: #ifdef MACSEC_SUPPORT err_macsec: #endif /* MACSEC_SUPPORT */ + unregister_netdev(ndev); +err_netdev: err_dma_mask: ether_disable_clks(pdata); ether_put_clks(pdata); @@ -6713,105 +6644,6 @@ static void ether_shutdown(struct platform_device *pdev) } #ifdef CONFIG_PM -/** - * @brief Ethernet platform driver suspend noirq callback. - * - * Alogorithm: Stops all data queues and PHY if the device - * does not wake capable. Disable TX and NAPI. - * Deinit OSI core, DMA and TX/RX interrupts. - * - * @param[in] dev: Platform device associated with platform driver. - * - * @retval 0 on success - * @retval "negative value" on failure. - */ -static int ether_suspend_noirq(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct ether_priv_data *pdata = netdev_priv(ndev); - struct osi_core_priv_data *osi_core = pdata->osi_core; - struct osi_dma_priv_data *osi_dma = pdata->osi_dma; - struct osi_ioctl ioctl_data = {}; - unsigned int i = 0, chan = 0; - int ret; - - if (!netif_running(ndev)) - return 0; - - /* Keep MACSEC to suspend if MACSEC is supported on this platform */ -#ifdef MACSEC_SUPPORT - if ((osi_core->mac == OSI_MAC_HW_EQOS && osi_core->mac_ver == OSI_EQOS_MAC_5_30) || - (osi_core->mac == OSI_MAC_HW_MGBE && osi_core->mac_ver == OSI_MGBE_MAC_3_10)) { - pdata->macsec_pdata->enabled_before_suspend = - pdata->macsec_pdata->enabled; - if (pdata->macsec_pdata->enabled != OSI_DISABLE) { - ret = macsec_suspend(pdata->macsec_pdata); - if (ret < 0) - dev_err(pdata->dev, "Failed to suspend macsec"); - } - } -#endif /* MACSEC_SUPPORT */ - - /* Since MAC is placed in reset during suspend, take a backup of - * current configuration so that SW view of HW is maintained across - * suspend/resume. - */ - ioctl_data.cmd = OSI_CMD_SAVE_REGISTER; - if (osi_handle_ioctl(osi_core, &ioctl_data)) { - dev_err(dev, "Failed to backup MAC core registers\n"); - return -EBUSY; - } - - /* stop workqueue */ - cancel_delayed_work_sync(&pdata->tx_ts_work); - - /* Stop workqueue while DUT is going to suspend state */ - ether_stats_work_queue_stop(pdata); -#ifdef HSI_SUPPORT - cancel_delayed_work_sync(&pdata->ether_hsi_work); -#endif - if (pdata->phydev && !(device_may_wakeup(&ndev->dev))) { - phy_stop(pdata->phydev); - if (gpio_is_valid(pdata->phy_reset)) - gpio_set_value(pdata->phy_reset, 0); - } - - netif_tx_disable(ndev); - ether_napi_disable(pdata); - - ret = ether_update_mac_addr_filter(pdata, &ioctl_data, OSI_DISABLE, - ETHER_ADDRESS_MAC); - if (ret < 0) { - dev_err(pdata->dev, "issue in deleting MAC address\n"); - } - - ret = ether_update_mac_addr_filter(pdata, &ioctl_data, OSI_DISABLE, - ETHER_ADDRESS_BC); - if (ret < 0) { - dev_err(pdata->dev, "issue in deleting BC address\n"); - } - - osi_hw_dma_deinit(osi_dma); - osi_hw_core_deinit(osi_core); - - for (i = 0; i < osi_dma->num_dma_chans; i++) { - chan = osi_dma->dma_chans[i]; - osi_handle_dma_intr(osi_dma, chan, - OSI_DMA_CH_TX_INTR, - OSI_DMA_INTR_DISABLE); - osi_handle_dma_intr(osi_dma, chan, - OSI_DMA_CH_RX_INTR, - OSI_DMA_INTR_DISABLE); - } - - free_dma_resources(pdata); - - if (osi_core->mac == OSI_MAC_HW_MGBE) - pm_runtime_put_sync(pdata->dev); - - return 0; -} - /** * @brief Ethernet platform driver resume call. * @@ -6872,29 +6704,10 @@ static int ether_resume(struct ether_priv_data *pdata) return ret; } - /* initialize mac/mtl/dma common registers */ - ret = osi_hw_core_init(osi_core, - pdata->hw_feat.tx_fifo_size, - pdata->hw_feat.rx_fifo_size); - if (ret < 0) { - dev_err(dev, - "%s: failed to initialize mac hw core with reason %d\n", - __func__, ret); - goto err_core; - } - - ret = ether_update_mac_addr_filter(pdata, &ioctl_data, OSI_ENABLE, - ETHER_ADDRESS_MAC); - if (ret < 0) { - dev_err(pdata->dev, "failed to set MAC address\n"); - goto err_dma; - } - - ret = ether_update_mac_addr_filter(pdata, &ioctl_data, OSI_ENABLE, - ETHER_ADDRESS_BC); - if (ret < 0) { - dev_err(pdata->dev, "failed to set BC address\n"); - goto err_dma; + ioctl_data.cmd = OSI_CMD_RESUME; + if (osi_handle_ioctl(osi_core, &ioctl_data)) { + dev_err(dev, "Failed to perform OSI resume\n"); + goto err_resume; } /* dma init */ @@ -6908,14 +6721,6 @@ static int ether_resume(struct ether_priv_data *pdata) /* enable NAPI */ ether_napi_enable(pdata); - /* start the mac */ - ioctl_data.cmd = OSI_CMD_START_MAC; - ret = osi_handle_ioctl(osi_core, &ioctl_data); - if (ret < 0) { - dev_err(dev, - "%s: failed to start MAC %d\n", __func__, ret); - goto err_start_mac; - } if (pdata->phydev && !(device_may_wakeup(&ndev->dev))) { /* configure phy init */ @@ -6935,7 +6740,7 @@ static int ether_resume(struct ether_priv_data *pdata) #ifdef MACSEC_SUPPORT if ((osi_core->mac == OSI_MAC_HW_EQOS && osi_core->mac_ver == OSI_EQOS_MAC_5_30) || (osi_core->mac == OSI_MAC_HW_MGBE && osi_core->mac_ver == OSI_MGBE_MAC_3_10)) { - if (pdata->macsec_pdata->enabled_before_suspend != OSI_DISABLE) { + if (pdata->macsec_pdata->next_supp_idx != OSI_DISABLE) { ret = macsec_resume(pdata->macsec_pdata); if (ret < 0) dev_err(pdata->dev, "Failed to resume MACSEC "); @@ -6944,16 +6749,102 @@ static int ether_resume(struct ether_priv_data *pdata) #endif /* MACSEC_SUPPORT */ return 0; -err_start_mac: - ether_napi_disable(pdata); + err_dma: + ether_napi_disable(pdata); osi_hw_core_deinit(osi_core); -err_core: +err_resume: free_dma_resources(pdata); return ret; } +/** + * @brief Ethernet platform driver suspend noirq callback. + * + * Alogorithm: Stops all data queues and PHY if the device + * does not wake capable. Disable TX and NAPI. + * Deinit OSI core, DMA and TX/RX interrupts. + * + * @param[in] dev: Platform device associated with platform driver. + * + * @retval 0 on success + * @retval "negative value" on failure. + */ +static int ether_suspend_noirq(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + struct osi_dma_priv_data *osi_dma = pdata->osi_dma; + struct osi_ioctl ioctl_data = {}; + unsigned int i = 0, chan = 0; + int ret; + + if (!netif_running(ndev)) + return 0; + + /* Keep MACSEC to suspend if MACSEC is supported on this platform */ +#ifdef MACSEC_SUPPORT + if ((osi_core->mac == OSI_MAC_HW_EQOS && osi_core->mac_ver == OSI_EQOS_MAC_5_30) || + (osi_core->mac == OSI_MAC_HW_MGBE && osi_core->mac_ver == OSI_MGBE_MAC_3_10)) { + if (pdata->macsec_pdata->next_supp_idx != OSI_DISABLE) { + ret = macsec_suspend(pdata->macsec_pdata); + if (ret < 0) + dev_err(pdata->dev, "Failed to suspend macsec"); + } + } +#endif /* MACSEC_SUPPORT */ + + tasklet_kill(&pdata->lane_restart_task); + + /* stop workqueue */ + cancel_delayed_work_sync(&pdata->tx_ts_work); + + /* Stop workqueue while DUT is going to suspend state */ + ether_stats_work_queue_stop(pdata); +#ifdef HSI_SUPPORT + cancel_delayed_work_sync(&pdata->ether_hsi_work); +#endif + if (pdata->phydev && !(device_may_wakeup(&ndev->dev))) { + phy_stop(pdata->phydev); + if (gpio_is_valid(pdata->phy_reset)) + gpio_set_value(pdata->phy_reset, 0); + } + + netif_tx_disable(ndev); + ether_napi_disable(pdata); + + osi_hw_dma_deinit(osi_dma); + + ioctl_data.cmd = OSI_CMD_SUSPEND; + if (osi_handle_ioctl(osi_core, &ioctl_data)) { + dev_err(dev, "Failed to perform OSI core suspend\n"); + if (ether_resume(pdata) < 0) { + dev_err(dev, "Failed to perform resume on suspend fail\n"); + } + return -EBUSY; + } + + for (i = 0; i < osi_dma->num_dma_chans; i++) { + chan = osi_dma->dma_chans[i]; + osi_handle_dma_intr(osi_dma, chan, + OSI_DMA_CH_TX_INTR, + OSI_DMA_INTR_DISABLE); + osi_handle_dma_intr(osi_dma, chan, + OSI_DMA_CH_RX_INTR, + OSI_DMA_INTR_DISABLE); + } + + free_dma_resources(pdata); + + if (osi_core->mac == OSI_MAC_HW_MGBE) + pm_runtime_put_sync(pdata->dev); + + return 0; +} + + /** * @brief Ethernet platform driver resume noirq callback. * @@ -6969,8 +6860,6 @@ static int ether_resume_noirq(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct ether_priv_data *pdata = netdev_priv(ndev); - struct osi_core_priv_data *osi_core = pdata->osi_core; - struct osi_ioctl ioctl_data = {}; int ret = 0; if (!netif_running(ndev)) @@ -6989,18 +6878,6 @@ static int ether_resume_noirq(struct device *dev) return ret; } - /* Since MAC is brought of reset, all the SW configuration done before - * suspend/resume will be overwritten by power-on-default values. - * Restore the backup of the MAC configuration to maintain consistency - * between SW/HW state. - */ - ioctl_data.cmd = OSI_CMD_RESTORE_REGISTER; - if (osi_handle_ioctl(osi_core, &ioctl_data)) { - //TODO: Ideally, undo MAC init/resume & return. - dev_err(dev, "Failed to restore MAC core registers\n"); - return -EIO; - } - return 0; } @@ -7021,6 +6898,8 @@ static const struct dev_pm_ops ether_pm_ops = { static const struct of_device_id ether_of_match[] = { { .compatible = "nvidia,nveqos" }, { .compatible = "nvidia,nvmgbe" }, + { .compatible = "nvidia,tegra234-mgbe" }, + { .compatible = "nvidia,tegra234-eqos" }, {}, }; MODULE_DEVICE_TABLE(of, ether_of_match); diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h index 1cee0dea33..9dca48b80b 100644 --- a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -206,21 +206,9 @@ #define ETHER_TX_MAX_FRAME_SIZE GSO_MAX_SIZE /** - * @brief IVC wait timeout. + * @brief IVC wait timeout cnt in micro seconds. */ -#define IVC_WAIT_TIMEOUT (msecs_to_jiffies(100)) - -/** - * @brief IVC read timeout cnt. - * used as 20*IVC_WAIT_TIMEOUT hence Max is 2 sec timeout. - */ -#define IVC_READ_TIMEOUT_CNT 20 - -/** - * @brief IVC channel timeout. - * Used with 1 millisec so max timeout is 50 ms. - */ -#define IVC_CHANNEL_TIMEOUT_CNT 50 +#define IVC_WAIT_TIMEOUT_CNT 200000 /** * @brief Broadcast and MAC address macros @@ -357,11 +345,7 @@ struct ether_ivc_ctxt { /** ivc cookie */ struct tegra_hv_ivc_cookie *ivck; /** ivc lock */ - struct mutex ivck_lock; - /** ivc work */ - struct work_struct ivc_work; - /** wait for event */ - struct completion msg_complete; + raw_spinlock_t ivck_lock; /** Flag to indicate ivc started or stopped */ unsigned int ivc_state; }; @@ -392,6 +376,24 @@ struct ether_tx_ts_skb_list { unsigned long pkt_jiffies; }; +/** + * @brief ether_xtra_stat_counters - OSI core extra stat counters + */ +struct ether_xtra_stat_counters { + /** rx skb allocation failure count */ + nveu64_t re_alloc_rxbuf_failed[OSI_MGBE_MAX_NUM_QUEUES]; + /** TX per channel interrupt count */ + nveu64_t tx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; + /** TX per channel SW timer callback count */ + nveu64_t tx_usecs_swtimer_n[OSI_MGBE_MAX_NUM_QUEUES]; + /** RX per channel interrupt count */ + nveu64_t rx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES]; + /** link connect count */ + nveu64_t link_connect_count; + /** link disconnect count */ + nveu64_t link_disconnect_count; +}; + /** * @brief Ethernet driver private data */ @@ -525,10 +527,6 @@ struct ether_priv_data { unsigned int promisc_mode; /** Delayed work queue to read RMON counters periodically */ struct delayed_work ether_stats_work; - /** process rx work */ - struct work_struct set_rx_mode_work; - /** rx lock */ - struct mutex rx_mode_lock; /** set speed work */ struct delayed_work set_speed_work; /** Flag to check if EEE LPI is enabled for the MAC */ @@ -623,6 +621,16 @@ struct ether_priv_data { raw_spinlock_t txts_lock; /** Ref count for ether_get_tx_ts_func */ atomic_t tx_ts_ref_cnt; + /** Ref count for set_speed_work_func */ + atomic_t set_speed_ref_cnt; + /** flag to enable logs using ethtool */ + u32 msg_enable; + /** flag to indicate to start/stop the Tx */ + unsigned int tx_start_stop; + /** Tasklet for restarting UPHY lanes */ + struct tasklet_struct lane_restart_task; + /** xtra sw error counters */ + struct ether_xtra_stat_counters xstats; }; /** @@ -809,6 +817,7 @@ int ether_tc_setup_cbs(struct ether_priv_data *pdata, * @retval EAGAIN on Failure */ int ether_get_tx_ts(struct ether_priv_data *pdata); +void ether_restart_lane_bringup_task(struct tasklet_struct *t); #ifdef ETHER_NVGRO void ether_nvgro_purge_timer(struct timer_list *t); #endif /* ETHER_NVGRO */ diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_tc.c b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_tc.c index 4478a262e5..1d0a1695d2 100644 --- a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_tc.c +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ether_tc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -228,6 +228,9 @@ int ether_tc_setup_cbs(struct ether_priv_data *pdata, ioctl_data.avb.oper_mode = OSI_MTL_QUEUE_AVB; ioctl_data.avb.credit_control = OSI_ENABLE; } else { + /* For EQOS harware library code use internally SP(0) and + For MGBE harware library code use internally ETS(2) if + algo != CBS. */ ioctl_data.avb.algo = OSI_MTL_TXQ_AVALG_SP; ioctl_data.avb.oper_mode = OSI_MTL_QUEUE_ENABLE; ioctl_data.avb.credit_control = OSI_DISABLE; diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ethtool.c b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ethtool.c index d5c9c6e362..c1c4308803 100644 --- a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ethtool.c +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -82,16 +82,6 @@ static const struct ether_stats ether_frpstrings_stats[] = { offsetof(struct osi_dma_priv_data, pkt_err_stats.y)} #endif -#if KERNEL_VERSION(5, 5, 0) > LINUX_VERSION_CODE -#define ETHER_CORE_PKT_ERR_STAT(z) \ -{ (#z), FIELD_SIZEOF(struct osi_core_pkt_err_stats, z), \ - offsetof(struct osi_core_priv_data, pkt_err_stats.z)} -#else -#define ETHER_CORE_PKT_ERR_STAT(z) \ -{ (#z), sizeof_field(struct osi_core_pkt_err_stats, z), \ - offsetof(struct osi_core_priv_data, pkt_err_stats.z)} -#endif - /** * @brief ETHER pkt_err statistics */ @@ -185,12 +175,12 @@ static const struct ether_stats ether_dstrings_stats[] = { */ #if KERNEL_VERSION(5, 5, 0) > LINUX_VERSION_CODE #define ETHER_EXTRA_STAT(b) \ -{ #b, FIELD_SIZEOF(struct osi_xtra_stat_counters, b), \ - offsetof(struct osi_core_priv_data, xstats.b)} +{ #b, FIELD_SIZEOF(struct ether_xtra_stat_counters, b), \ + offsetof(struct ether_priv_data, xstats.b)} #else #define ETHER_EXTRA_STAT(b) \ -{ #b, sizeof_field(struct osi_xtra_stat_counters, b), \ - offsetof(struct osi_core_priv_data, xstats.b)} +{ #b, sizeof_field(struct ether_xtra_stat_counters, b), \ + offsetof(struct ether_priv_data, xstats.b)} #endif /** * @brief Ethernet extra statistics @@ -207,49 +197,6 @@ static const struct ether_stats ether_gstrings_stats[] = { ETHER_EXTRA_STAT(re_alloc_rxbuf_failed[8]), ETHER_EXTRA_STAT(re_alloc_rxbuf_failed[9]), - /* Tx/Rx IRQ error info */ - ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[0]), - ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[1]), - ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[2]), - ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[3]), - ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[4]), - ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[5]), - ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[6]), - ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[7]), - ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[8]), - ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[9]), - ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[0]), - ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[1]), - ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[2]), - ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[3]), - ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[4]), - ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[5]), - ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[6]), - ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[7]), - ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[8]), - ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[9]), - ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[0]), - ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[1]), - ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[2]), - ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[3]), - ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[4]), - ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[5]), - ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[6]), - ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[7]), - ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[8]), - ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[9]), - ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[0]), - ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[1]), - ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[2]), - ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[3]), - ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[4]), - ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[5]), - ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[6]), - ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[7]), - ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[8]), - ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[9]), - ETHER_EXTRA_STAT(rx_watchdog_irq_n), - ETHER_EXTRA_STAT(fatal_bus_error_irq_n), /* Tx/Rx IRQ Events */ ETHER_EXTRA_STAT(tx_normal_irq_n[0]), @@ -284,14 +231,6 @@ static const struct ether_stats ether_gstrings_stats[] = { ETHER_EXTRA_STAT(rx_normal_irq_n[9]), ETHER_EXTRA_STAT(link_disconnect_count), ETHER_EXTRA_STAT(link_connect_count), - ETHER_EXTRA_STAT(ts_lock_add_fail), - ETHER_EXTRA_STAT(ts_lock_del_fail), - - /* Packet error stats */ - ETHER_CORE_PKT_ERR_STAT(mgbe_ip_header_err), - ETHER_CORE_PKT_ERR_STAT(mgbe_jabber_timeout_err), - ETHER_CORE_PKT_ERR_STAT(mgbe_payload_cs_err), - ETHER_CORE_PKT_ERR_STAT(mgbe_tx_underflow_err), }; /** @@ -493,58 +432,101 @@ static const struct ether_stats ether_mmc[] = { }; /** - * @brief Ethernet extra TSN statistics array length + * @brief Ethernet extra statistics array length */ -#define ETHER_EXTRA_TSN_STAT_LEN OSI_ARRAY_SIZE(ether_tstrings_stats) +#define ETHER_CORE_STAT_LEN OSI_ARRAY_SIZE(ether_tstrings_stats) /** * @brief Name of extra Ethernet stats, with length of name not more than * ETH_GSTRING_LEN MAC */ #if KERNEL_VERSION(5, 5, 0) > LINUX_VERSION_CODE -#define ETHER_MMC_STAT(c) \ -{ #c, FIELD_SIZEOF(struct osi_mmc_counters, c), \ - offsetof(struct osi_core_priv_data, mmc.c)} +#define ETHER_CORE_STATS(r) \ +{ (#r), FIELD_SIZEOF(struct osi_stats, r), \ + offsetof(struct osi_core_priv_data, stats.r)} #else -#define ETHER_MMC_STAT(c) \ -{ #c, sizeof_field(struct osi_mmc_counters, c), \ - offsetof(struct osi_core_priv_data, mmc.c)} +#define ETHER_CORE_STATS(r) \ +{ (#r), sizeof_field(struct osi_stats, r), \ + offsetof(struct osi_core_priv_data, stats.r)} #endif -#if KERNEL_VERSION(5, 5, 0) > LINUX_VERSION_CODE -#define ETHER_TEXTRA_STAT(r) \ -{ (#r), FIELD_SIZEOF(struct osi_tsn_stats, r), \ - offsetof(struct osi_core_priv_data, tsn_stats.r)} -#else -#define ETHER_TEXTRA_STAT(r) \ -{ (#r), sizeof_field(struct osi_tsn_stats, r), \ - offsetof(struct osi_core_priv_data, tsn_stats.r)} -#endif /** * @brief Ethernet extra statistics */ static const struct ether_stats ether_tstrings_stats[] = { - ETHER_TEXTRA_STAT(const_gate_ctr_err), - ETHER_TEXTRA_STAT(head_of_line_blk_sch), - ETHER_TEXTRA_STAT(hlbs_q[0]), - ETHER_TEXTRA_STAT(hlbs_q[1]), - ETHER_TEXTRA_STAT(hlbs_q[2]), - ETHER_TEXTRA_STAT(hlbs_q[3]), - ETHER_TEXTRA_STAT(hlbs_q[4]), - ETHER_TEXTRA_STAT(hlbs_q[5]), - ETHER_TEXTRA_STAT(hlbs_q[6]), - ETHER_TEXTRA_STAT(hlbs_q[7]), - ETHER_TEXTRA_STAT(head_of_line_blk_frm), - ETHER_TEXTRA_STAT(hlbf_q[0]), - ETHER_TEXTRA_STAT(hlbf_q[1]), - ETHER_TEXTRA_STAT(hlbf_q[2]), - ETHER_TEXTRA_STAT(hlbf_q[3]), - ETHER_TEXTRA_STAT(hlbf_q[4]), - ETHER_TEXTRA_STAT(hlbf_q[5]), - ETHER_TEXTRA_STAT(hlbf_q[6]), - ETHER_TEXTRA_STAT(hlbf_q[7]), - ETHER_TEXTRA_STAT(base_time_reg_err), - ETHER_TEXTRA_STAT(sw_own_list_complete), + ETHER_CORE_STATS(const_gate_ctr_err), + ETHER_CORE_STATS(head_of_line_blk_sch), + ETHER_CORE_STATS(hlbs_q[0]), + ETHER_CORE_STATS(hlbs_q[1]), + ETHER_CORE_STATS(hlbs_q[2]), + ETHER_CORE_STATS(hlbs_q[3]), + ETHER_CORE_STATS(hlbs_q[4]), + ETHER_CORE_STATS(hlbs_q[5]), + ETHER_CORE_STATS(hlbs_q[6]), + ETHER_CORE_STATS(hlbs_q[7]), + ETHER_CORE_STATS(head_of_line_blk_frm), + ETHER_CORE_STATS(hlbf_q[0]), + ETHER_CORE_STATS(hlbf_q[1]), + ETHER_CORE_STATS(hlbf_q[2]), + ETHER_CORE_STATS(hlbf_q[3]), + ETHER_CORE_STATS(hlbf_q[4]), + ETHER_CORE_STATS(hlbf_q[5]), + ETHER_CORE_STATS(hlbf_q[6]), + ETHER_CORE_STATS(hlbf_q[7]), + ETHER_CORE_STATS(base_time_reg_err), + ETHER_CORE_STATS(sw_own_list_complete), + + /* Tx/Rx IRQ error info */ + ETHER_CORE_STATS(tx_proc_stopped_irq_n[0]), + ETHER_CORE_STATS(tx_proc_stopped_irq_n[1]), + ETHER_CORE_STATS(tx_proc_stopped_irq_n[2]), + ETHER_CORE_STATS(tx_proc_stopped_irq_n[3]), + ETHER_CORE_STATS(tx_proc_stopped_irq_n[4]), + ETHER_CORE_STATS(tx_proc_stopped_irq_n[5]), + ETHER_CORE_STATS(tx_proc_stopped_irq_n[6]), + ETHER_CORE_STATS(tx_proc_stopped_irq_n[7]), + ETHER_CORE_STATS(tx_proc_stopped_irq_n[8]), + ETHER_CORE_STATS(tx_proc_stopped_irq_n[9]), + ETHER_CORE_STATS(rx_proc_stopped_irq_n[0]), + ETHER_CORE_STATS(rx_proc_stopped_irq_n[1]), + ETHER_CORE_STATS(rx_proc_stopped_irq_n[2]), + ETHER_CORE_STATS(rx_proc_stopped_irq_n[3]), + ETHER_CORE_STATS(rx_proc_stopped_irq_n[4]), + ETHER_CORE_STATS(rx_proc_stopped_irq_n[5]), + ETHER_CORE_STATS(rx_proc_stopped_irq_n[6]), + ETHER_CORE_STATS(rx_proc_stopped_irq_n[7]), + ETHER_CORE_STATS(rx_proc_stopped_irq_n[8]), + ETHER_CORE_STATS(rx_proc_stopped_irq_n[9]), + ETHER_CORE_STATS(tx_buf_unavail_irq_n[0]), + ETHER_CORE_STATS(tx_buf_unavail_irq_n[1]), + ETHER_CORE_STATS(tx_buf_unavail_irq_n[2]), + ETHER_CORE_STATS(tx_buf_unavail_irq_n[3]), + ETHER_CORE_STATS(tx_buf_unavail_irq_n[4]), + ETHER_CORE_STATS(tx_buf_unavail_irq_n[5]), + ETHER_CORE_STATS(tx_buf_unavail_irq_n[6]), + ETHER_CORE_STATS(tx_buf_unavail_irq_n[7]), + ETHER_CORE_STATS(tx_buf_unavail_irq_n[8]), + ETHER_CORE_STATS(tx_buf_unavail_irq_n[9]), + ETHER_CORE_STATS(rx_buf_unavail_irq_n[0]), + ETHER_CORE_STATS(rx_buf_unavail_irq_n[1]), + ETHER_CORE_STATS(rx_buf_unavail_irq_n[2]), + ETHER_CORE_STATS(rx_buf_unavail_irq_n[3]), + ETHER_CORE_STATS(rx_buf_unavail_irq_n[4]), + ETHER_CORE_STATS(rx_buf_unavail_irq_n[5]), + ETHER_CORE_STATS(rx_buf_unavail_irq_n[6]), + ETHER_CORE_STATS(rx_buf_unavail_irq_n[7]), + ETHER_CORE_STATS(rx_buf_unavail_irq_n[8]), + ETHER_CORE_STATS(rx_buf_unavail_irq_n[9]), + ETHER_CORE_STATS(rx_watchdog_irq_n), + ETHER_CORE_STATS(fatal_bus_error_irq_n), + ETHER_CORE_STATS(ts_lock_add_fail), + ETHER_CORE_STATS(ts_lock_del_fail), + + /* Packet error stats */ + ETHER_CORE_STATS(mgbe_ip_header_err), + ETHER_CORE_STATS(mgbe_jabber_timeout_err), + ETHER_CORE_STATS(mgbe_payload_cs_err), + ETHER_CORE_STATS(mgbe_tx_underflow_err), }; /** @@ -583,6 +565,16 @@ static void ether_get_ethtool_stats(struct net_device *dev, return; } + if (osi_core->use_virtualization == OSI_ENABLE) { + ioctl_data.cmd = OSI_CMD_READ_STATS; + ret = osi_handle_ioctl(osi_core, &ioctl_data); + if (ret == -1) { + dev_err(pdata->dev, + "Fail to read core stats\n"); + return; + } + } + for (i = 0; i < ETHER_MMC_STATS_LEN; i++) { char *p = (char *)osi_core + ether_mmc[i].stat_offset; @@ -592,7 +584,7 @@ static void ether_get_ethtool_stats(struct net_device *dev, } for (i = 0; i < ETHER_EXTRA_STAT_LEN; i++) { - char *p = (char *)osi_core + + char *p = (char *)pdata + ether_gstrings_stats[i].stat_offset; data[j++] = (ether_gstrings_stats[i].sizeof_stat == @@ -615,8 +607,7 @@ static void ether_get_ethtool_stats(struct net_device *dev, sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); } - for (i = 0; ((i < ETHER_EXTRA_TSN_STAT_LEN) && - (pdata->hw_feat.est_sel == OSI_ENABLE)); i++) { + for (i = 0; i < ETHER_CORE_STAT_LEN; i++) { char *p = (char *)osi_core + ether_tstrings_stats[i].stat_offset; @@ -626,7 +617,7 @@ static void ether_get_ethtool_stats(struct net_device *dev, for (i = 0; ((i < ETHER_FRP_STAT_LEN) && (pdata->hw_feat.frp_sel == OSI_ENABLE)); i++) { - char *p = (char *)osi_core + + char *p = (char *)osi_dma + ether_frpstrings_stats[i].stat_offset; data[j++] = (ether_frpstrings_stats[i].sizeof_stat == @@ -675,12 +666,10 @@ static int ether_get_sset_count(struct net_device *dev, int sset) } else { len += ETHER_PKT_ERR_STAT_LEN; } - if (INT_MAX - ETHER_EXTRA_TSN_STAT_LEN < len) { + if (INT_MAX - ETHER_CORE_STAT_LEN < len) { /* do nothing */ } else { - if (pdata->hw_feat.est_sel == OSI_ENABLE) { - len += ETHER_EXTRA_TSN_STAT_LEN; - } + len += ETHER_CORE_STAT_LEN; } if (INT_MAX - ETHER_FRP_STAT_LEN < len) { /* do nothing */ @@ -752,9 +741,7 @@ static void ether_get_strings(struct net_device *dev, u32 stringset, u8 *data) } p += ETH_GSTRING_LEN; } - for (i = 0; ((i < ETHER_EXTRA_TSN_STAT_LEN) && - (pdata->hw_feat.est_sel == OSI_ENABLE)); - i++) { + for (i = 0; i < ETHER_CORE_STAT_LEN; i++) { str = (u8 *)ether_tstrings_stats[i].stat_string; if (memcpy(p, str, ETH_GSTRING_LEN) == OSI_NULL) { @@ -1603,6 +1590,20 @@ static int ether_set_ringparam(struct net_device *ndev, return ret; } +static unsigned int ether_get_msglevel(struct net_device *ndev) +{ + struct ether_priv_data *pdata = netdev_priv(ndev); + + return pdata->msg_enable; +} + +static void ether_set_msglevel(struct net_device *ndev, u32 level) +{ + struct ether_priv_data *pdata = netdev_priv(ndev); + + pdata->msg_enable = level; +} + /** * @brief Set of ethtool operations */ @@ -1634,6 +1635,8 @@ static const struct ethtool_ops ether_ethtool_ops = { .set_rxfh = ether_set_rxfh, .get_ringparam = ether_get_ringparam, .set_ringparam = ether_set_ringparam, + .get_msglevel = ether_get_msglevel, + .set_msglevel = ether_set_msglevel, }; void ether_set_ethtool_ops(struct net_device *ndev) diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ioctl.c b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ioctl.c index 8a04aa67cd..12b2b85ccd 100644 --- a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ioctl.c +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ioctl.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -118,7 +118,7 @@ static bool ether_is_bc_addr(unsigned char *bc_addr) * @retval "nagative value" on Failure */ static int ether_set_avb_algo(struct net_device *ndev, - struct ether_ifr_data *ifdata) + struct ether_exported_ifr_data *ifdata) { struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; @@ -174,7 +174,7 @@ static int ether_set_avb_algo(struct net_device *ndev, * @retval "nagative value" on Failure */ static int ether_m2m_tsync(struct net_device *ndev, - struct ether_ifr_data *ifdata) + struct ether_exported_ifr_data *ifdata) { struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; @@ -214,7 +214,7 @@ static int ether_m2m_tsync(struct net_device *ndev, * @retval "negative value" on Failure */ static int ether_get_tsc_ptp_cap(struct net_device *ndev, - struct ether_ifr_data *ifdata) + struct ether_exported_ifr_data *ifdata) { struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; @@ -266,7 +266,7 @@ static int ether_get_tsc_ptp_cap(struct net_device *ndev, * @retval "negative value" on Failure */ static int ether_get_avb_algo(struct net_device *ndev, - struct ether_ifr_data *ifdata) + struct ether_exported_ifr_data *ifdata) { struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; @@ -315,7 +315,7 @@ static int ether_get_avb_algo(struct net_device *ndev, * @retval "negative value" on Failure */ static int ether_config_ptp_offload(struct ether_priv_data *pdata, - struct ether_ifr_data *ifrd_p) + struct ether_exported_ifr_data *ifrd_p) { int ret = -EINVAL; struct ptp_offload_param param; @@ -415,7 +415,7 @@ static int ether_config_ptp_offload(struct ether_priv_data *pdata, * @retval "negative value" on Failure */ static int ether_config_arp_offload(struct ether_priv_data *pdata, - struct ether_ifr_data *ifrd_p) + struct ether_exported_ifr_data *ifrd_p) { int ret = -EINVAL; struct arp_offload_param param; @@ -465,7 +465,7 @@ static int ether_config_arp_offload(struct ether_priv_data *pdata, * @retval "negative value" on Failure */ static int ether_config_frp_cmd(struct net_device *dev, - struct ether_ifr_data *ifdata) + struct ether_exported_ifr_data *ifdata) { struct ether_priv_data *pdata = netdev_priv(dev); struct osi_core_priv_data *osi_core = pdata->osi_core; @@ -503,8 +503,7 @@ static int ether_config_frp_cmd(struct net_device *dev, * 2) OSI call to update register * * @param[in] dev: pointer to net device structure. - * @param[in] filter_flags: flag to indicate whether L3/L4 filtering to be - * enabled/disabled. + * @param[in] ifdata: pointer to IOCTL specific structure. * * @note MAC and PHY need to be initialized. * @@ -513,43 +512,12 @@ static int ether_config_frp_cmd(struct net_device *dev, * */ static int ether_config_l3_l4_filtering(struct net_device *dev, - unsigned int filter_flags) -{ - struct ether_priv_data *pdata = netdev_priv(dev); - - dev_err(pdata->dev, "%s: This ioctl is deprecated, directly set the filter using ioctl command EQOS_IPV4/IPV6/TCP/UDP_FILTERING_CMD instead\n", - __func__); - return -1; -} - -/** - * @brief This function is invoked by ioctl function when user issues an ioctl - * command to configure L3(IPv4) filtering. - * - * Algorithm: - * 1) Layer 3 and Layer 4 Filter Enable, if already not. - * 2) Enable/disable IPv4 filtering. - * 3) Select source/destination address matching. - * 4) Select perfect/inverse matching. - * 5) Update the IPv4 address into MAC register. - * - * @param[in] dev: Pointer to net device structure. - * @param[in] ifdata: pointer to IOCTL specific structure. - * - * @note MAC and PHY need to be initialized. - * - * @retval 0 on Success - * @retval "negative value" on Failure - */ -static int ether_config_ip4_filters(struct net_device *dev, - struct ether_ifr_data *ifdata) + struct ether_exported_ifr_data *ifdata) { struct ether_priv_data *pdata = netdev_priv(dev); struct osi_core_priv_data *osi_core = pdata->osi_core; - struct osi_l3_l4_filter *u_l3_filter = - (struct osi_l3_l4_filter *)ifdata->ptr; - struct osi_ioctl ioctl_data = {}; - unsigned int is_l4_filter = OSI_DISABLE; + struct osi_l3_l4_filter *u_l3_filter; + struct osi_ioctl ioctl_data = { 0 }; int ret = -EINVAL; if (pdata->hw_feat.l3l4_filter_num == OSI_DISABLE) { @@ -557,67 +525,51 @@ static int ether_config_ip4_filters(struct net_device *dev, return ret; } - if (ifdata->ptr == NULL) { + if (!ifdata->ptr) { dev_err(pdata->dev, "%s: Invalid data for priv ioctl %d\n", __func__, ifdata->ifcmd); return ret; } + u_l3_filter = (struct osi_l3_l4_filter *)ifdata->ptr; + if (copy_from_user(&ioctl_data.l3l4_filter, (void __user *)u_l3_filter, sizeof(struct osi_l3_l4_filter)) != 0U) { dev_err(pdata->dev, "%s copy from user failed\n", __func__); return -EFAULT; } - if (ioctl_data.l3l4_filter.filter_no > - (pdata->hw_feat.l3l4_filter_num - 1U)) { - dev_err(pdata->dev, "%d filter is not supported in the HW\n", - ioctl_data.l3l4_filter.filter_no); - return ret; - } - ioctl_data.cmd = OSI_CMD_L3L4_FILTER; - ioctl_data.arg1_u32 = OSI_IP4_FILTER; - ioctl_data.arg2_u32 = OSI_DISABLE; - ioctl_data.arg3_u32 = OSI_CHAN_ANY; - ioctl_data.arg4_u32 = is_l4_filter; - return osi_handle_ioctl(osi_core, &ioctl_data); } /** - * @brief This function is invoked by ioctl when user issues an ioctl command - * to configure L3 (IPv6) filtering. + * @brief This function is invoked by ioctl function when user issues an ioctl + * command to configure L2 filtering. * * Algorithm: - * 1) Enable/disable IPv6 filtering. + * 1) Return error if Ethrmet virtualization is not enabled. * 2) Select source/destination address matching. * 3) Select perfect/inverse matching. - * 4) Update the IPv6 address into MAC register. + * 4) Update the L2 MAC address into MAC register. * - * @param[in] dev: net device structure instance. - * @param[in] ifdata: IOCTL specific structure instance. + * @param[in] dev: Pointer to net device structure. + * @param[in] ifdata: pointer to IOCTL specific structure. * - * @note MAC and PHY need to be initialized. + * @note MAC and PHY need to be initialized. Only * * @retval 0 on Success * @retval "negative value" on Failure */ -static int ether_config_ip6_filters(struct net_device *dev, - struct ether_ifr_data *ifdata) +static int ether_config_l2_filters(struct net_device *dev, + struct ether_exported_ifr_data *ifdata) { struct ether_priv_data *pdata = netdev_priv(dev); struct osi_core_priv_data *osi_core = pdata->osi_core; - struct osi_l3_l4_filter *u_l3_filter = - (struct osi_l3_l4_filter *)ifdata->ptr; + struct osi_dma_priv_data *osi_dma = pdata->osi_dma; + struct ether_l2_filter u_l2_filter; struct osi_ioctl ioctl_data = {}; - unsigned int is_l4_filter = OSI_DISABLE; - int ret = -EINVAL; - - if (pdata->hw_feat.l3l4_filter_num == OSI_DISABLE) { - dev_err(pdata->dev, "ip6 filter is not supported in the HW\n"); - return ret; - } + int ret = -1; if (ifdata->ptr == NULL) { dev_err(pdata->dev, "%s: Invalid data for priv ioctl %d\n", @@ -625,91 +577,36 @@ static int ether_config_ip6_filters(struct net_device *dev, return ret; } - if (copy_from_user(&ioctl_data.l3l4_filter, (void __user *)u_l3_filter, - sizeof(struct osi_l3_l4_filter)) != 0U) { - dev_err(pdata->dev, "%s copy from user failed\n", __func__); - return -EFAULT; - } - - if (ioctl_data.l3l4_filter.filter_no > - (pdata->hw_feat.l3l4_filter_num - 1U)) { - dev_err(pdata->dev, "%d filter is not supported in the HW\n", - ioctl_data.l3l4_filter.filter_no); + if (osi_core->use_virtualization == OSI_DISABLE) { + dev_err(pdata->dev, "%s Ethernet virualization is not enabled\n", __func__); return ret; } - - ioctl_data.cmd = OSI_CMD_L3L4_FILTER; - ioctl_data.arg1_u32 = OSI_IP6_FILTER; - ioctl_data.arg2_u32 = OSI_DISABLE; - ioctl_data.arg3_u32 = OSI_CHAN_ANY; - ioctl_data.arg4_u32 = is_l4_filter; - - return osi_handle_ioctl(osi_core, &ioctl_data); -} - -/** - * @brief This function is invoked by ioctl function when user issues an ioctl - * command to configure L4(TCP/UDP) filtering. - * - * Algorithm: - * 1) Enable/disable L4 filtering. - * 2) Select TCP/UDP filtering. - * 3) Select source/destination port matching. - * 4) select perfect/inverse matching. - * 5) Update the port number into MAC register. - * - * @param[in] dev: pointer to net device structure. - * @param[in] ifdata: pointer to IOCTL specific structure. - * @param[in] tcp_udp: flag to indicate TCP/UDP filtering. - * - * @note MAC and PHY need to be initialized. - * - * @retval 0 on Success - * @retval "negative value" on Failure - */ -static int ether_config_tcp_udp_filters(struct net_device *dev, - struct ether_ifr_data *ifdata, - unsigned int tcp_udp) -{ - struct ether_priv_data *pdata = netdev_priv(dev); - struct osi_core_priv_data *osi_core = pdata->osi_core; - struct osi_l3_l4_filter *u_l4_filter = - (struct osi_l3_l4_filter *)ifdata->ptr; - struct osi_ioctl ioctl_data = {}; - unsigned int is_l4_filter = OSI_ENABLE; - int ret = -EINVAL; - - if (ifdata->ptr == NULL) { - dev_err(pdata->dev, "%s: Invalid data for priv ioctl %d\n", - __func__, ifdata->ifcmd); + if (copy_from_user(&u_l2_filter, (void __user *)ifdata->ptr, + sizeof(struct ether_l2_filter)) != 0U) { + dev_err(pdata->dev, "%s copy from user failed\n", __func__); return ret; } - if (pdata->hw_feat.l3l4_filter_num == OSI_DISABLE) { - dev_err(pdata->dev, - "L4 is not supported in the HW\n"); - return ret; - } + ioctl_data.l2_filter.index = u_l2_filter.index; + ioctl_data.l2_filter.src_dest = OSI_DA_MATCH; - if (copy_from_user(&ioctl_data.l3l4_filter, (void __user *)u_l4_filter, - sizeof(struct osi_l3_l4_filter)) != 0U) { - dev_err(pdata->dev, "%s copy from user failed", __func__); - return -EFAULT; - } + ioctl_data.l2_filter.oper_mode = (OSI_OPER_EN_PERFECT | + OSI_OPER_DIS_PROMISC | + OSI_OPER_DIS_ALLMULTI); - if (ioctl_data.l3l4_filter.filter_no > - (pdata->hw_feat.l3l4_filter_num - 1U)) { - dev_err(pdata->dev, "%d filter is not supported in the HW\n", - ioctl_data.l3l4_filter.filter_no); - return ret; + if (u_l2_filter.en_dis == OSI_ENABLE) { + ioctl_data.l2_filter.oper_mode |= OSI_OPER_ADDR_UPDATE; + } else { + ioctl_data.l2_filter.oper_mode |= OSI_OPER_ADDR_DEL; } - ioctl_data.cmd = OSI_CMD_L3L4_FILTER; - ioctl_data.arg1_u32 = tcp_udp; - ioctl_data.arg2_u32 = OSI_DISABLE; - ioctl_data.arg3_u32 = OSI_CHAN_ANY; - ioctl_data.arg4_u32 = is_l4_filter; - + memcpy(ioctl_data.l2_filter.mac_address, + u_l2_filter.mac_address, ETH_ALEN); + ioctl_data.l2_filter.dma_routing = OSI_ENABLE; + ioctl_data.l2_filter.addr_mask = OSI_DISABLE; + ioctl_data.l2_filter.dma_chan = osi_dma->dma_chans[0]; + ioctl_data.l2_filter.dma_chansel = OSI_BIT(osi_dma->dma_chans[0]); + ioctl_data.cmd = OSI_CMD_L2_FILTER; return osi_handle_ioctl(osi_core, &ioctl_data); } @@ -730,7 +627,7 @@ static int ether_config_tcp_udp_filters(struct net_device *dev, * @retval "negative value" on Failure */ static int ether_config_vlan_filter(struct net_device *dev, - struct ether_ifr_data *ifdata) + struct ether_exported_ifr_data *ifdata) { struct ether_priv_data *pdata = netdev_priv(dev); struct osi_core_priv_data *osi_core = pdata->osi_core; @@ -830,7 +727,7 @@ static int ether_config_mc_dmasel(struct net_device *dev, * @retval "negative value" on Failure */ static int ether_config_l2_da_filter(struct net_device *dev, - struct ether_ifr_data *ifdata) + struct ether_exported_ifr_data *ifdata) { struct ether_priv_data *pdata = netdev_priv(dev); struct osi_core_priv_data *osi_core = pdata->osi_core; @@ -880,48 +777,6 @@ static int ether_config_l2_da_filter(struct net_device *dev, return ret; } -/** - * @brief This function is invoked by ioctl when user issues an ioctl command - * to save/restore MAC registers. - * - * Algorithm: Call osi_save_registers and osi_restore_registers - * based on user flags. - * - * @param[in] ndev: network device structure - * @param[in] flags: flags to indicate whether to save and restore MAC registers - * - * @note Ethernet interface need to be up. - * - * @retval 0 on Success - * @retval "negative value" on Failure - */ -static int ether_reg_save_restore(struct net_device *ndev, - unsigned int flags) -{ - struct ether_priv_data *pdata = netdev_priv(ndev); - struct osi_core_priv_data *osi_core = pdata->osi_core; - struct osi_ioctl ioctl_data = {}; - - if (flags == OSI_ENABLE) { - ioctl_data.cmd = OSI_CMD_RESTORE_REGISTER; - if (osi_handle_ioctl(osi_core, &ioctl_data)) { - dev_err(pdata->dev, "Restore MAC registers fail\n"); - return -EBUSY; - } - } else if (flags == OSI_DISABLE) { - ioctl_data.cmd = OSI_CMD_SAVE_REGISTER; - if (osi_handle_ioctl(osi_core, &ioctl_data)) { - dev_err(pdata->dev, "Save MAC registers fail\n"); - return -EBUSY; - } - } else { - dev_err(pdata->dev, "Invalid flag values:%d\n", flags); - return -EINVAL; - } - - return 0; -} - /** * @brief This function is invoked by ioctl when user issues an ioctl command * to enable/disable pad calibration at run time. @@ -1079,7 +934,7 @@ static int ether_config_ptp_rxq(struct net_device *ndev, * @retval "negative value" on Failure */ static int ether_config_est(struct net_device *dev, - struct ether_ifr_data *ifdata) + struct ether_exported_ifr_data *ifdata) { struct ether_priv_data *pdata = netdev_priv(dev); struct osi_core_priv_data *osi_core = pdata->osi_core; @@ -1126,7 +981,7 @@ static int ether_config_est(struct net_device *dev, * @retval "negative value" on Failure */ static int ether_config_fpe(struct net_device *dev, - struct ether_ifr_data *ifdata) + struct ether_exported_ifr_data *ifdata) { struct ether_priv_data *pdata = netdev_priv(dev); struct osi_core_priv_data *osi_core = pdata->osi_core; @@ -1142,7 +997,8 @@ static int ether_config_fpe(struct net_device *dev, } if (copy_from_user(&ioctl_data.fpe, (void __user *)u_fpe_cfg, - sizeof(struct osi_fpe_config) != 0U)) { + sizeof(struct osi_fpe_config)) != 0U) { + dev_err(pdata->dev, "%s: copy_from_user error\n", __func__); return -EFAULT; } @@ -1157,6 +1013,44 @@ static int ether_config_fpe(struct net_device *dev, return ret; } +#ifdef OSI_DEBUG +/** + * @brief handle ETHER_DEBUG_INTR_CONFIG ioctl + * + * Algorithm: + * - Call OSI_DMA_DEBUG_INTR_CONFIG to enable/disable debug interrupt + * - Call OSI_CMD_DEBUG_INTR_CONFIG to enable/disable debug interrupt + * + * @param[in] ndev: network device structure + * @param[in] ifdata: interface private data structure + * + * @note Ethernet interface need to be up. + * + * @retval 0 on Success + * @retval "nagative value" on Failure + */ +static int ether_debug_intr_config(struct net_device *ndev, + struct ether_exported_ifr_data *ifdata) +{ + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + struct osi_ioctl ioctl_data = {}; + struct osi_dma_priv_data *osi_dma = pdata->osi_dma; + unsigned int enable = ifdata->if_flags; + int ret = -1; + + osi_dma->ioctl_data.cmd = OSI_DMA_IOCTL_CMD_DEBUG_INTR_CONFIG; + osi_dma->ioctl_data.arg_u32 = enable; + ret = osi_dma_ioctl(osi_dma); + if (ret < 0) + return ret; + + ioctl_data.cmd = OSI_CMD_DEBUG_INTR_CONFIG; + ioctl_data.arg1_u32 = enable; + return osi_handle_ioctl(osi_core, &ioctl_data); +} +#endif + /** * @brief ether_priv_ioctl - Handle private IOCTLs * @@ -1178,7 +1072,7 @@ int ether_handle_priv_ioctl(struct net_device *ndev, { struct ether_priv_data *pdata = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; - struct ether_ifr_data ifdata; + struct ether_exported_ifr_data ifdata; struct osi_core_priv_data *osi_core = pdata->osi_core; #ifdef OSI_DEBUG struct osi_dma_priv_data *osi_dma = pdata->osi_dma; @@ -1196,11 +1090,7 @@ int ether_handle_priv_ioctl(struct net_device *ndev, /* Enforce admin permission check */ switch (ifdata.ifcmd) { case ETHER_AVB_ALGORITHM: - case EQOS_L3_L4_FILTER_CMD: - case EQOS_IPV4_FILTERING_CMD: - case EQOS_IPV6_FILTERING_CMD: - case EQOS_UDP_FILTERING_CMD: - case EQOS_TCP_FILTERING_CMD: + case EQOS_L3L4_FILTER_CMD: case EQOS_VLAN_FILTERING_CMD: case EQOS_L2_DA_FILTERING_CMD: case ETHER_CONFIG_ARP_OFFLOAD: @@ -1257,13 +1147,10 @@ int ether_handle_priv_ioctl(struct net_device *ndev, ret = -EOPNOTSUPP; } break; - case EQOS_L3_L4_FILTER_CMD: + case EQOS_L3L4_FILTER_CMD: /* flags should be 0x0 or 0x1, discard any other */ - if (pdata->hw_feat.l3l4_filter_num > 0U && - ((ifdata.if_flags == OSI_ENABLE) || - (ifdata.if_flags == OSI_DISABLE))) { - ret = ether_config_l3_l4_filtering(ndev, - ifdata.if_flags); + if (pdata->hw_feat.l3l4_filter_num > 0U) { + ret = ether_config_l3_l4_filtering(ndev, &ifdata); if (ret == 0) { ret = EQOS_CONFIG_SUCCESS; } else { @@ -1277,20 +1164,6 @@ int ether_handle_priv_ioctl(struct net_device *ndev, case ETHER_CONFIG_FRP_CMD: ret = ether_config_frp_cmd(ndev, &ifdata); break; - case EQOS_IPV4_FILTERING_CMD: - ret = ether_config_ip4_filters(ndev, &ifdata); - break; - case EQOS_IPV6_FILTERING_CMD: - ret = ether_config_ip6_filters(ndev, &ifdata); - break; - case EQOS_UDP_FILTERING_CMD: - ret = ether_config_tcp_udp_filters(ndev, &ifdata, - OSI_L4_FILTER_UDP); - break; - case EQOS_TCP_FILTERING_CMD: - ret = ether_config_tcp_udp_filters(ndev, &ifdata, - OSI_L4_FILTER_TCP); - break; case EQOS_VLAN_FILTERING_CMD: ret = ether_config_vlan_filter(ndev, &ifdata); break; @@ -1303,9 +1176,6 @@ int ether_handle_priv_ioctl(struct net_device *ndev, case ETHER_CONFIG_LOOPBACK_MODE: ret = ether_config_loopback_mode(ndev, ifdata.if_flags); break; - case ETHER_SAVE_RESTORE: - ret = ether_reg_save_restore(ndev, ifdata.if_flags); - break; case ETHER_CONFIG_EST: ret = ether_config_est(ndev, &ifdata); break; @@ -1347,6 +1217,9 @@ int ether_handle_priv_ioctl(struct net_device *ndev, ioctl_data.cmd = OSI_CMD_STRUCTS_DUMP; ret = osi_handle_ioctl(pdata->osi_core, &ioctl_data); break; + case ETHER_DEBUG_INTR_CONFIG: + ret = ether_debug_intr_config(ndev, &ifdata); + break; #endif case ETHER_CAP_TSC_PTP: ret = ether_get_tsc_ptp_cap(ndev, &ifdata); @@ -1355,6 +1228,11 @@ int ether_handle_priv_ioctl(struct net_device *ndev, case ETHER_M2M_TSYNC: ret = ether_m2m_tsync(ndev, &ifdata); break; + + case ETHER_L2_ADDR: + ret = ether_config_l2_filters(ndev, &ifdata); + break; + default: break; } diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ioctl.h b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ioctl.h index 4f4f35ba29..79e4ee01e4 100644 --- a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ioctl.h +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ioctl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -17,6 +17,7 @@ #ifndef IOCTL_H #define IOCTL_H +#include "ether_export.h" /** *@addtogroup IOCTL Helper MACROS * @{ @@ -41,20 +42,14 @@ #define ETHER_PRV_TS_IOCTL (SIOCDEVPRIVATE + 1) #define ETHER_PRV_RMDIO_IOCTL (SIOCDEVPRIVATE + 2) #define ETHER_PRV_WMDIO_IOCTL (SIOCDEVPRIVATE + 3) +/* private ioctl number*/ /* TX/RX channel/queue count */ #define EQOS_GET_TX_QCNT 23 #define EQOS_GET_RX_QCNT 24 -/* Line speed */ +/** Line speed */ #define EQOS_GET_CONNECTED_SPEED 25 -/* private ioctl number*/ -#define ETHER_AVB_ALGORITHM 27 /* L3/L4 filter */ -#define EQOS_L3_L4_FILTER_CMD 29 -/* IPv4/6 and TCP/UDP filtering */ -#define EQOS_IPV4_FILTERING_CMD 30 -#define EQOS_IPV6_FILTERING_CMD 31 -#define EQOS_UDP_FILTERING_CMD 32 -#define EQOS_TCP_FILTERING_CMD 33 +#define EQOS_L3L4_FILTER_CMD 29 /* VLAN filtering */ #define EQOS_VLAN_FILTERING_CMD 34 /* L2 DA filtering */ @@ -62,13 +57,7 @@ #define ETHER_CONFIG_ARP_OFFLOAD 36 #define ETHER_CONFIG_LOOPBACK_MODE 40 #define ETHER_CONFIG_PTP_OFFLOAD 42 -#define ETHER_GET_AVB_ALGORITHM 46 -#define ETHER_SAVE_RESTORE 47 #define ETHER_PTP_RXQUEUE 48 -#define ETHER_CONFIG_EST 49 -#define ETHER_CONFIG_FPE 50 -/* FRP Command */ -#define ETHER_CONFIG_FRP_CMD 51 #define ETHER_MC_DMA_ROUTE 52 #define ETHER_READ_REG 53 #define ETHER_WRITE_REG 54 @@ -79,36 +68,11 @@ #endif /* OSI_DEBUG */ #define ETHER_CAP_TSC_PTP 58 #define ETHER_M2M_TSYNC 59 - +#ifdef OSI_DEBUG +#define ETHER_DEBUG_INTR_CONFIG 60 +#endif /** @} */ -/** - * @brief struct ether_ifr_data - Private data of struct ifreq - */ -struct ether_ifr_data { - /** Flags used for specific ioctl - like enable/disable */ - unsigned int if_flags; - /** qinx: Queue index to be used for certain ioctls */ - unsigned int qinx; - /** The private ioctl command number */ - unsigned int ifcmd; - /** Used to indicate if context descriptor needs to be setup to - * handle ioctl */ - unsigned int context_setup; - /** Used to query the connected link speed */ - unsigned int connected_speed; - /** Used to set Remote wakeup filters */ - unsigned int rwk_filter_values[EQOS_RWK_FILTER_LENGTH]; - /** Number of remote wakeup filters to use */ - unsigned int rwk_filter_length; - /** The return value of IOCTL handler func */ - int command_error; - /** test_done: Not in use, keep for app compatibility */ - int test_done; - /** IOCTL cmd specific structure pointer */ - void *ptr; -}; - /** * @brief struct arp_offload_param - Parameter to support ARP offload. */ diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/macsec.c b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/macsec.c index fbf2daa8d8..cd5e10aed4 100644 --- a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/macsec.c +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/macsec.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -22,17 +22,13 @@ #ifdef HSI_SUPPORT #include #endif -/** - * @brief is_nv_macsec_fam_registered - Is nv macsec nl registered - */ -static int is_nv_macsec_fam_registered = OSI_DISABLE; static int macsec_get_tx_next_pn(struct sk_buff *skb, struct genl_info *info); #ifndef MACSEC_KEY_PROGRAM static int macsec_tz_kt_config(struct ether_priv_data *pdata, unsigned char cmd, struct osi_macsec_kt_config *const kt_config, - struct genl_info *const info); + struct genl_info *const info, struct nvpkcs_data *pkcs); #endif static irqreturn_t macsec_s_isr(int irq, void *data) @@ -40,7 +36,7 @@ static irqreturn_t macsec_s_isr(int irq, void *data) struct macsec_priv_data *macsec_pdata = (struct macsec_priv_data *)data; struct ether_priv_data *pdata = macsec_pdata->ether_pdata; - osi_macsec_s_isr(pdata->osi_core); + osi_macsec_isr(pdata->osi_core); return IRQ_HANDLED; } @@ -102,7 +98,7 @@ static irqreturn_t macsec_ns_isr(int irq, void *data) struct ether_priv_data *pdata = macsec_pdata->ether_pdata; int irq_ret = IRQ_HANDLED; - osi_macsec_ns_isr(pdata->osi_core); + osi_macsec_isr(pdata->osi_core); #ifdef HSI_SUPPORT if (pdata->osi_core->hsi.enabled == OSI_ENABLE && @@ -285,10 +281,10 @@ int macsec_open(struct macsec_priv_data *macsec_pdata, goto err_osi_init; } -#ifndef MACSEC_KEY_PROGRAM +#if !defined(MACSEC_KEY_PROGRAM) && !defined(NVPKCS_MACSEC) /* Clear KT entries */ - ret = macsec_tz_kt_config(pdata, OSI_MACSEC_CMD_TZ_KT_RESET, - OSI_NULL, genl_info); + ret = macsec_tz_kt_config(pdata, NV_MACSEC_CMD_TZ_KT_RESET, + OSI_NULL, genl_info, NULL); if (ret < 0) { dev_err(dev, "TZ key config failed %d\n", ret); goto err_osi_en; @@ -334,7 +330,6 @@ int macsec_suspend(struct macsec_priv_data *macsec_pdata) dev_err(dev, "Failed to close macsec\n"); return ret; } - macsec_disable_car(macsec_pdata); return ret; } @@ -347,13 +342,18 @@ int macsec_resume(struct macsec_priv_data *macsec_pdata) { struct ether_priv_data *pdata = macsec_pdata->ether_pdata; struct device *dev = pdata->dev; + struct osi_core_priv_data *osi_core = pdata->osi_core; int ret = 0; - ret = macsec_enable_car(macsec_pdata); - if (ret < 0) { - dev_err(dev, "Unable to enable macsec clks & reset\n"); - return ret; + if ((osi_core->use_virtualization == OSI_DISABLE) && + (macsec_pdata->ns_rst)) { + ret = reset_control_reset(macsec_pdata->ns_rst); + if (ret < 0) { + dev_err(dev, "failed to reset macsec\n"); + return ret; + } } + return macsec_open(macsec_pdata, OSI_NULL); } @@ -690,7 +690,8 @@ static int macsec_set_controlled_port(struct sk_buff *skb, } static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa, - struct osi_macsec_sc_info *sc_info) + struct osi_macsec_sc_info *sc_info, + struct nvpkcs_data *pkcs) { if (!attrs[NV_MACSEC_ATTR_SA_CONFIG]) return -EINVAL; @@ -713,11 +714,23 @@ static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa, if (tb_sa[NV_MACSEC_SA_ATTR_LOWEST_PN]) { sc_info->lowest_pn = nla_get_u32(tb_sa[NV_MACSEC_SA_ATTR_LOWEST_PN]); } +#ifdef NVPKCS_MACSEC + if (pkcs) { + if (tb_sa[NV_MACSEC_SA_PKCS_KEY_WRAP]) { + memcpy(pkcs->nv_key, + nla_data(tb_sa[NV_MACSEC_SA_PKCS_KEY_WRAP]), + sizeof(pkcs->nv_key)); + } + if (tb_sa[NV_MACSEC_SA_PKCS_KEK_HANDLE]) { + pkcs->nv_kek = nla_get_u64(tb_sa[NV_MACSEC_SA_PKCS_KEK_HANDLE]); + } + } +#else if (tb_sa[NV_MACSEC_SA_ATTR_KEY]) { memcpy(sc_info->sak, nla_data(tb_sa[NV_MACSEC_SA_ATTR_KEY]), - sizeof(sc_info->sak)); + sizeof(sc_info->sak)); } - +#endif /* NVPKCS_MACSEC */ return 0; } @@ -726,7 +739,7 @@ static int macsec_dis_rx_sa(struct sk_buff *skb, struct genl_info *info) struct nlattr **attrs = info->attrs; struct macsec_priv_data *macsec_pdata; struct ether_priv_data *pdata; - struct osi_macsec_sc_info rx_sa; + struct osi_macsec_sc_info rx_sa = {0}; struct nlattr *tb_sa[NUM_NV_MACSEC_SA_ATTR]; int ret = 0; unsigned short kt_idx; @@ -735,6 +748,7 @@ static int macsec_dis_rx_sa(struct sk_buff *skb, struct genl_info *info) struct osi_macsec_kt_config kt_config = {0}; struct osi_macsec_table_config *table_config; #endif /* !MACSEC_KEY_PROGRAM */ + struct nvpkcs_data pkcs = {0}; PRINT_ENTRY(); @@ -754,7 +768,7 @@ static int macsec_dis_rx_sa(struct sk_buff *skb, struct genl_info *info) } if (!attrs[NV_MACSEC_ATTR_IFNAME] || - parse_sa_config(attrs, tb_sa, &rx_sa)) { + parse_sa_config(attrs, tb_sa, &rx_sa, &pkcs)) { dev_err(dev, "%s: failed to parse nlattrs", __func__); ret = -EINVAL; goto exit; @@ -770,8 +784,6 @@ static int macsec_dis_rx_sa(struct sk_buff *skb, struct genl_info *info) rx_sa.curr_an, rx_sa.next_pn); dev_info(dev, "\tkey: " KEYSTR, KEY2STR(rx_sa.sak)); - rx_sa.flags = OSI_DISABLE_SA; - mutex_lock(&macsec_pdata->lock); ret = osi_macsec_config(pdata->osi_core, &rx_sa, OSI_DISABLE, OSI_CTLR_SEL_RX, &kt_idx); @@ -787,14 +799,16 @@ static int macsec_dis_rx_sa(struct sk_buff *skb, struct genl_info *info) table_config->rw = OSI_LUT_WRITE; table_config->index = kt_idx; - ret = macsec_tz_kt_config(pdata, OSI_MACSEC_CMD_TZ_CONFIG, &kt_config, - info); + ret = macsec_tz_kt_config(pdata, NV_MACSEC_CMD_TZ_CONFIG, &kt_config, + info, &pkcs); if (ret < 0) { dev_err(dev, "%s: failed to program SAK through TZ %d", __func__, ret); goto exit; } #endif /* !MACSEC_KEY_PROGRAM */ + /* Update the macsec pdata when AN is disabled */ + macsec_pdata->macsec_rx_an_map &= ~((1U) << (rx_sa.curr_an & 0xFU)); exit: PRINT_EXIT(); return ret; @@ -840,7 +854,7 @@ static int macsec_create_rx_sa(struct sk_buff *skb, struct genl_info *info) struct nlattr **attrs = info->attrs; struct macsec_priv_data *macsec_pdata; struct ether_priv_data *pdata; - struct osi_macsec_sc_info rx_sa; + struct osi_macsec_sc_info rx_sa = {0}; struct nlattr *tb_sa[NUM_NV_MACSEC_SA_ATTR]; int ret = 0; unsigned short kt_idx; @@ -850,6 +864,7 @@ static int macsec_create_rx_sa(struct sk_buff *skb, struct genl_info *info) struct osi_macsec_kt_config kt_config = {0}; struct osi_macsec_table_config *table_config; #endif /* !MACSEC_KEY_PROGRAM */ + struct nvpkcs_data pkcs = {0}; PRINT_ENTRY(); macsec_pdata = genl_to_macsec_pdata(info); @@ -868,7 +883,7 @@ static int macsec_create_rx_sa(struct sk_buff *skb, struct genl_info *info) } if (!attrs[NV_MACSEC_ATTR_IFNAME] || - parse_sa_config(attrs, tb_sa, &rx_sa)) { + parse_sa_config(attrs, tb_sa, &rx_sa, &pkcs)) { dev_err(dev, "%s: failed to parse nlattrs", __func__); ret = -EINVAL; goto exit; @@ -894,8 +909,8 @@ static int macsec_create_rx_sa(struct sk_buff *skb, struct genl_info *info) ret = -EINVAL; goto exit; } -#endif /* MACSEC_KEY_PROGRAM */ rx_sa.flags = OSI_CREATE_SA; +#endif /* MACSEC_KEY_PROGRAM */ mutex_lock(&macsec_pdata->lock); ret = osi_macsec_config(pdata->osi_core, &rx_sa, OSI_ENABLE, @@ -914,12 +929,12 @@ static int macsec_create_rx_sa(struct sk_buff *skb, struct genl_info *info) table_config->index = kt_idx; kt_config.flags |= OSI_LUT_FLAGS_ENTRY_VALID; - for (i = 0; i < OSI_KEY_LEN_128; i++) { + for (i = 0; i < OSI_KEY_LEN_256; i++) { kt_config.entry.sak[i] = rx_sa.sak[i]; } - ret = macsec_tz_kt_config(pdata, OSI_MACSEC_CMD_TZ_CONFIG, &kt_config, - info); + ret = macsec_tz_kt_config(pdata, NV_MACSEC_CMD_TZ_CONFIG, &kt_config, + info, &pkcs); if (ret < 0) { dev_err(dev, "%s: failed to program SAK through TZ %d", __func__, ret); @@ -937,7 +952,7 @@ static int macsec_en_rx_sa(struct sk_buff *skb, struct genl_info *info) struct nlattr **attrs = info->attrs; struct macsec_priv_data *macsec_pdata; struct ether_priv_data *pdata; - struct osi_macsec_sc_info rx_sa; + struct osi_macsec_sc_info rx_sa = {0}; struct nlattr *tb_sa[NUM_NV_MACSEC_SA_ATTR]; int ret = 0; unsigned short kt_idx; @@ -960,17 +975,14 @@ static int macsec_en_rx_sa(struct sk_buff *skb, struct genl_info *info) } if (!attrs[NV_MACSEC_ATTR_IFNAME] || - parse_sa_config(attrs, tb_sa, &rx_sa)) { + parse_sa_config(attrs, tb_sa, &rx_sa, NULL)) { dev_err(dev, "%s: failed to parse nlattrs", __func__); ret = -EINVAL; goto exit; } - dev_err(dev, "%s: Enable receive SA", __func__); - rx_sa.pn_window = macsec_pdata->pn_window; rx_sa.flags = OSI_ENABLE_SA; - mutex_lock(&macsec_pdata->lock); ret = osi_macsec_config(pdata->osi_core, &rx_sa, OSI_ENABLE, OSI_CTLR_SEL_RX, &kt_idx); @@ -980,7 +992,8 @@ static int macsec_en_rx_sa(struct sk_buff *skb, struct genl_info *info) goto exit; } mutex_unlock(&macsec_pdata->lock); - + /* Update the macsec pdata when AN is enabled */ + macsec_pdata->macsec_rx_an_map |= ((1U) << (rx_sa.curr_an & 0xFU)); exit: PRINT_EXIT(); return ret; @@ -991,7 +1004,7 @@ static int macsec_dis_tx_sa(struct sk_buff *skb, struct genl_info *info) struct nlattr **attrs = info->attrs; struct macsec_priv_data *macsec_pdata; struct ether_priv_data *pdata; - struct osi_macsec_sc_info tx_sa; + struct osi_macsec_sc_info tx_sa = {0}; struct nlattr *tb_sa[NUM_NV_MACSEC_SA_ATTR]; int ret = 0; unsigned short kt_idx; @@ -1000,6 +1013,7 @@ static int macsec_dis_tx_sa(struct sk_buff *skb, struct genl_info *info) struct osi_macsec_kt_config kt_config = {0}; struct osi_macsec_table_config *table_config; #endif /* !MACSEC_KEY_PROGRAM */ + struct nvpkcs_data pkcs = {0}; PRINT_ENTRY(); macsec_pdata = genl_to_macsec_pdata(info); @@ -1018,7 +1032,7 @@ static int macsec_dis_tx_sa(struct sk_buff *skb, struct genl_info *info) } if (!attrs[NV_MACSEC_ATTR_IFNAME] || - parse_sa_config(attrs, tb_sa, &tx_sa)) { + parse_sa_config(attrs, tb_sa, &tx_sa, &pkcs)) { dev_err(dev, "%s: failed to parse nlattrs", __func__); ret = -EINVAL; goto exit; @@ -1034,8 +1048,6 @@ static int macsec_dis_tx_sa(struct sk_buff *skb, struct genl_info *info) tx_sa.curr_an, tx_sa.next_pn); dev_info(dev, "\tkey: " KEYSTR, KEY2STR(tx_sa.sak)); - tx_sa.flags = OSI_DISABLE_SA; - mutex_lock(&macsec_pdata->lock); ret = osi_macsec_config(pdata->osi_core, &tx_sa, OSI_DISABLE, OSI_CTLR_SEL_TX, &kt_idx); @@ -1052,8 +1064,8 @@ static int macsec_dis_tx_sa(struct sk_buff *skb, struct genl_info *info) table_config->rw = OSI_LUT_WRITE; table_config->index = kt_idx; - ret = macsec_tz_kt_config(pdata, OSI_MACSEC_CMD_TZ_CONFIG, &kt_config, - info); + ret = macsec_tz_kt_config(pdata, NV_MACSEC_CMD_TZ_CONFIG, &kt_config, + info, &pkcs); if (ret < 0) { dev_err(dev, "%s: failed to program SAK through TZ %d", __func__, ret); @@ -1061,6 +1073,8 @@ static int macsec_dis_tx_sa(struct sk_buff *skb, struct genl_info *info) } #endif /* !MACSEC_KEY_PROGRAM */ + /* Update the macsec pdata when AN is disbled */ + macsec_pdata->macsec_tx_an_map &= ~((1U) << (tx_sa.curr_an & 0xFU)); exit: PRINT_EXIT(); return ret; @@ -1071,7 +1085,7 @@ static int macsec_create_tx_sa(struct sk_buff *skb, struct genl_info *info) struct nlattr **attrs = info->attrs; struct macsec_priv_data *macsec_pdata; struct ether_priv_data *pdata; - struct osi_macsec_sc_info tx_sa; + struct osi_macsec_sc_info tx_sa = {0}; struct nlattr *tb_sa[NUM_NV_MACSEC_SA_ATTR]; int ret = 0; unsigned short kt_idx; @@ -1081,6 +1095,7 @@ static int macsec_create_tx_sa(struct sk_buff *skb, struct genl_info *info) struct osi_macsec_kt_config kt_config = {0}; struct osi_macsec_table_config *table_config; #endif /* !MACSEC_KEY_PROGRAM */ + struct nvpkcs_data pkcs = {0}; PRINT_ENTRY(); macsec_pdata = genl_to_macsec_pdata(info); @@ -1099,7 +1114,7 @@ static int macsec_create_tx_sa(struct sk_buff *skb, struct genl_info *info) } if (!attrs[NV_MACSEC_ATTR_IFNAME] || - parse_sa_config(attrs, tb_sa, &tx_sa)) { + parse_sa_config(attrs, tb_sa, &tx_sa, &pkcs)) { dev_err(dev, "%s: failed to parse nlattrs", __func__); ret = -EINVAL; goto exit; @@ -1115,8 +1130,9 @@ static int macsec_create_tx_sa(struct sk_buff *skb, struct genl_info *info) tx_sa.sci[4], tx_sa.sci[5], tx_sa.sci[6], tx_sa.sci[7], tx_sa.curr_an, tx_sa.next_pn); dev_info(dev, "\tkey: " KEYSTR, KEY2STR(tx_sa.sak)); - tx_sa.flags = OSI_CREATE_SA; + #ifdef MACSEC_KEY_PROGRAM + tx_sa.flags = OSI_CREATE_SA; ret = hkey_generation(tx_sa.sak, tx_sa.hkey); if (ret != 0) { dev_err(dev, "%s: failed to Generate HKey", __func__); @@ -1142,12 +1158,12 @@ static int macsec_create_tx_sa(struct sk_buff *skb, struct genl_info *info) table_config->index = kt_idx; kt_config.flags |= OSI_LUT_FLAGS_ENTRY_VALID; - for (i = 0; i < OSI_KEY_LEN_128; i++) { + for (i = 0; i < OSI_KEY_LEN_256; i++) { kt_config.entry.sak[i] = tx_sa.sak[i]; } - ret = macsec_tz_kt_config(pdata, OSI_MACSEC_CMD_TZ_CONFIG, &kt_config, - info); + ret = macsec_tz_kt_config(pdata, NV_MACSEC_CMD_TZ_CONFIG, &kt_config, + info, &pkcs); if (ret < 0) { dev_err(dev, "%s: failed to program SAK through TZ %d", __func__, ret); @@ -1165,7 +1181,7 @@ static int macsec_en_tx_sa(struct sk_buff *skb, struct genl_info *info) struct nlattr **attrs = info->attrs; struct macsec_priv_data *macsec_pdata; struct ether_priv_data *pdata; - struct osi_macsec_sc_info tx_sa; + struct osi_macsec_sc_info tx_sa = {0}; struct nlattr *tb_sa[NUM_NV_MACSEC_SA_ATTR]; int ret = 0; unsigned short kt_idx; @@ -1188,16 +1204,14 @@ static int macsec_en_tx_sa(struct sk_buff *skb, struct genl_info *info) } if (!attrs[NV_MACSEC_ATTR_IFNAME] || - parse_sa_config(attrs, tb_sa, &tx_sa)) { + parse_sa_config(attrs, tb_sa, &tx_sa, NULL)) { dev_err(dev, "%s: failed to parse nlattrs", __func__); ret = -EINVAL; goto exit; } - dev_info(dev, "%s: ENable Transmit SA", __func__); tx_sa.pn_window = macsec_pdata->pn_window; tx_sa.flags = OSI_ENABLE_SA; - mutex_lock(&macsec_pdata->lock); ret = osi_macsec_config(pdata->osi_core, &tx_sa, OSI_ENABLE, OSI_CTLR_SEL_TX, &kt_idx); @@ -1208,7 +1222,8 @@ static int macsec_en_tx_sa(struct sk_buff *skb, struct genl_info *info) } mutex_unlock(&macsec_pdata->lock); - + /* Update the macsec pdata when AN is enabled */ + macsec_pdata->macsec_tx_an_map |= ((1U) << (tx_sa.curr_an & 0xFU)); exit: PRINT_EXIT(); return ret; @@ -1315,11 +1330,12 @@ static int macsec_init(struct sk_buff *skb, struct genl_info *info) goto exit; } mutex_lock(&macsec_pdata->lock); - - if (macsec_pdata->next_supp_idx >= OSI_MAX_NUM_SC) { + /* only one supplicant is allowed per VF */ + if (macsec_pdata->next_supp_idx >= MAX_SUPPLICANTS_ALLOWED) { ret = -EPROTO; mutex_unlock(&macsec_pdata->lock); - dev_err(dev, "%s: Reached max supported supplicants", __func__); + dev_err(dev, "%s: Reached max supported supplicants %u", __func__, + macsec_pdata->next_supp_idx); goto exit; } @@ -1345,6 +1361,8 @@ static int macsec_init(struct sk_buff *skb, struct genl_info *info) ret = -EPROTO; goto exit; } + macsec_pdata->macsec_rx_an_map = 0U; + macsec_pdata->macsec_tx_an_map = 0U; done: atomic_inc(&macsec_pdata->ref_count); dev_info(dev, "%s: ref_count %d", __func__, @@ -1391,7 +1409,13 @@ static int macsec_set_replay_prot(struct sk_buff *skb, struct genl_info *info) goto exit; } - macsec_pdata->pn_window = window; + /* If Replay protection is disabled from supplicant use maximum + * PN window as replay protecion is already enabled in macsec_init + */ + if (replay_prot == OSI_ENABLE) + macsec_pdata->pn_window = window; + else + macsec_pdata->pn_window = OSI_PN_MAX_DEFAULT; exit: PRINT_EXIT(); @@ -1466,16 +1490,6 @@ static const struct genl_ops nv_macsec_genl_ops[] = { }, }; -static struct genl_family nv_macsec_fam = { - .name = NV_MACSEC_GENL_NAME, - .hdrsize = 0, - .version = NV_MACSEC_GENL_VERSION, - .maxattr = NV_MACSEC_ATTR_MAX, - .module = THIS_MODULE, - .ops = nv_macsec_genl_ops, - .n_ops = ARRAY_SIZE(nv_macsec_genl_ops), -}; - void macsec_remove(struct ether_priv_data *pdata) { struct macsec_priv_data *macsec_pdata = NULL; @@ -1503,9 +1517,9 @@ void macsec_remove(struct ether_priv_data *pdata) } /* Unregister generic netlink */ - if (is_nv_macsec_fam_registered == OSI_ENABLE) { - genl_unregister_family(&nv_macsec_fam); - is_nv_macsec_fam_registered = OSI_DISABLE; + if (macsec_pdata->is_nv_macsec_fam_registered == OSI_ENABLE) { + genl_unregister_family(&macsec_pdata->nv_macsec_fam); + macsec_pdata->is_nv_macsec_fam_registered = OSI_DISABLE; } /* Release platform resources */ @@ -1544,7 +1558,7 @@ int macsec_probe(struct ether_priv_data *pdata) tz_addr = (res->start - MACSEC_SIZE); #endif } else { - /* MACsec not enabled in DT, nothing more to do */ + /* MACsec not supported per DT config, nothing more to do */ osi_core->macsec_base = NULL; osi_core->tz_base = NULL; pdata->macsec_pdata = NULL; @@ -1571,6 +1585,17 @@ int macsec_probe(struct ether_priv_data *pdata) } macsec_pdata->ether_pdata = pdata; pdata->macsec_pdata = macsec_pdata; + + /* Read if macsec is enabled in DT */ + ret = of_property_read_u32(np, "nvidia,macsec-enable", + &macsec_pdata->is_macsec_enabled_in_dt); + if ((ret != 0) || (macsec_pdata->is_macsec_enabled_in_dt == 0U)) { + dev_info(dev, + "macsec param in DT is missing or disabled\n"); + ret = 1; + goto init_err; + } + mutex_init(&pdata->macsec_pdata->lock); /* Read MAC instance id and used in TZ api's */ @@ -1600,28 +1625,48 @@ int macsec_probe(struct ether_priv_data *pdata) } /* Enable CAR */ - ret = macsec_enable_car(macsec_pdata); - if (ret < 0) { - dev_err(dev, "Unable to enable macsec clks & reset\n"); - goto car_err; + if (osi_core->use_virtualization == OSI_DISABLE) { + ret = macsec_enable_car(macsec_pdata); + if (ret < 0) { + dev_err(dev, "Unable to enable macsec clks & reset\n"); + goto car_err; + } } /* Register macsec generic netlink ops */ - if (is_nv_macsec_fam_registered == OSI_DISABLE) { - ret = genl_register_family(&nv_macsec_fam); + macsec_pdata->nv_macsec_fam.hdrsize = 0; + macsec_pdata->nv_macsec_fam.version = NV_MACSEC_GENL_VERSION; + macsec_pdata->nv_macsec_fam.maxattr = NV_MACSEC_ATTR_MAX; + macsec_pdata->nv_macsec_fam.module = THIS_MODULE; + macsec_pdata->nv_macsec_fam.ops = nv_macsec_genl_ops; + macsec_pdata->nv_macsec_fam.n_ops = ARRAY_SIZE(nv_macsec_genl_ops); + if (macsec_pdata->is_nv_macsec_fam_registered == OSI_DISABLE) { + if (strlen(netdev_name(pdata->ndev)) >= GENL_NAMSIZ) { + dev_err(dev, "Intf name %s of len %lu exceed nl_family name size\n", + netdev_name(pdata->ndev), + strlen(netdev_name(pdata->ndev))); + ret = -1; + goto genl_err; + } else { + strncpy(macsec_pdata->nv_macsec_fam.name, + netdev_name(pdata->ndev), GENL_NAMSIZ - 1); + } + ret = genl_register_family(&macsec_pdata->nv_macsec_fam); if (ret) { dev_err(dev, "Failed to register GENL ops %d\n", ret); goto genl_err; } - is_nv_macsec_fam_registered = OSI_ENABLE; + macsec_pdata->is_nv_macsec_fam_registered = OSI_ENABLE; } PRINT_EXIT(); return ret; genl_err: - macsec_disable_car(macsec_pdata); + if (osi_core->use_virtualization == OSI_DISABLE) { + macsec_disable_car(macsec_pdata); + } car_err: macsec_release_platform_res(macsec_pdata); init_err: @@ -1646,7 +1691,7 @@ int macsec_probe(struct ether_priv_data *pdata) static int macsec_tz_kt_config(struct ether_priv_data *pdata, unsigned char cmd, struct osi_macsec_kt_config *const kt_config, - struct genl_info *const info) + struct genl_info *const info, struct nvpkcs_data *pkcs) { struct sk_buff *msg; struct nlattr *nest; @@ -1665,12 +1710,8 @@ static int macsec_tz_kt_config(struct ether_priv_data *pdata, goto fail; } - /* remap osi tz cmd to netlink cmd */ - if (cmd == OSI_MACSEC_CMD_TZ_CONFIG) { - cmd = NV_MACSEC_CMD_TZ_CONFIG; - } else if (cmd == OSI_MACSEC_CMD_TZ_KT_RESET) { - cmd = NV_MACSEC_CMD_TZ_KT_RESET; - } else { + if (cmd != NV_MACSEC_CMD_TZ_KT_RESET && + cmd != NV_MACSEC_CMD_TZ_CONFIG) { dev_err(dev, "%s: Wrong TZ cmd %d\n", __func__, cmd); ret = -1; goto fail; @@ -1683,7 +1724,7 @@ static int macsec_tz_kt_config(struct ether_priv_data *pdata, goto fail; } - msg_head = genlmsg_put_reply(msg, info, &nv_macsec_fam, 0, cmd); + msg_head = genlmsg_put_reply(msg, info, &macsec_pdata->nv_macsec_fam, 0, cmd); if (msg_head == NULL) { dev_err(dev, "unable to get replyhead\n"); ret = -EINVAL; @@ -1721,9 +1762,20 @@ static int macsec_tz_kt_config(struct ether_priv_data *pdata, kt_config->table_config.rw); nla_put_u8(msg, NV_MACSEC_TZ_ATTR_INDEX, kt_config->table_config.index); + nla_put_u32(msg, NV_MACSEC_TZ_ATTR_FLAG, kt_config->flags); +#ifdef NVPKCS_MACSEC + if (pkcs) { + nla_put(msg, NV_MACSEC_TZ_PKCS_KEY_WRAP, + sizeof(pkcs->nv_key), + pkcs->nv_key); + nla_put_u64_64bit(msg, NV_MACSEC_TZ_PKCS_KEK_HANDLE, + pkcs->nv_kek, + NL_POLICY_TYPE_ATTR_PAD); + } +#else nla_put(msg, NV_MACSEC_TZ_ATTR_KEY, OSI_KEY_LEN_256, kt_config->entry.sak); - nla_put_u32(msg, NV_MACSEC_TZ_ATTR_FLAG, kt_config->flags); +#endif /* NVPKCS_MACSEC */ nla_nest_end(msg, nest); } genlmsg_end(msg, msg_head); @@ -1778,7 +1830,7 @@ static int macsec_get_tx_next_pn(struct sk_buff *skb, struct genl_info *info) } if (!attrs[NV_MACSEC_ATTR_IFNAME] || - parse_sa_config(attrs, tb_sa, &tx_sa)) { + parse_sa_config(attrs, tb_sa, &tx_sa, NULL)) { dev_err(dev, "%s: failed to parse nlattrs", __func__); ret = -EINVAL; goto exit; @@ -1809,7 +1861,7 @@ static int macsec_get_tx_next_pn(struct sk_buff *skb, struct genl_info *info) goto exit; } - msg_head = genlmsg_put_reply(msg, info, &nv_macsec_fam, 0, cmd); + msg_head = genlmsg_put_reply(msg, info, &macsec_pdata->nv_macsec_fam, 0, cmd); if (!msg_head) { dev_err(dev, "unable to get replyhead\n"); ret = -EINVAL; diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/macsec.h b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/macsec.h index a2f9e5bf41..916ff1216b 100644 --- a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/macsec.h +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/macsec.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -39,13 +39,21 @@ */ #define BYP_LUT_INPUTS 1 +/** + * @brief MACSEC SECTAG + ICV + 2B ethertype adds up to 34B + */ +#define MACSEC_TAG_ICV_LEN 34U + /** * @brief Size of Macsec IRQ name. */ #define MACSEC_IRQ_NAME_SZ 32 -/* TODO - include name of driver interface as well */ -#define NV_MACSEC_GENL_NAME "nv_macsec" +/** + * @brief Maximum number of supplicants allowed per VF + */ +#define MAX_SUPPLICANTS_ALLOWED 1 + #define NV_MACSEC_GENL_VERSION 1 #ifdef MACSEC_KEY_PROGRAM @@ -58,6 +66,13 @@ #define KEYSTR "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x \ %02x %02x %02x %02x %02x %02x" +/* For 128 bit SAK, key len is 16 bytes, wrapped key len is 24 bytes + * and for 256 SAK, key len is 32 bytes, wrapped key len is 40 bytes + */ +#define NV_SAK_WRAPPED_LEN 40 +/* PKCS KEK CK_OBJECT_HANDLE is u64 type */ +#define NV_KEK_HANDLE_SIZE 8 + /* keep the same enum definition in nv macsec supplicant driver */ enum nv_macsec_sa_attrs { NV_MACSEC_SA_ATTR_UNSPEC, @@ -65,7 +80,12 @@ enum nv_macsec_sa_attrs { NV_MACSEC_SA_ATTR_AN, NV_MACSEC_SA_ATTR_PN, NV_MACSEC_SA_ATTR_LOWEST_PN, +#ifdef NVPKCS_MACSEC + NV_MACSEC_SA_PKCS_KEY_WRAP, + NV_MACSEC_SA_PKCS_KEK_HANDLE, +#else NV_MACSEC_SA_ATTR_KEY, +#endif /* NVPKCS_MACSEC */ __NV_MACSEC_SA_ATTR_END, NUM_NV_MACSEC_SA_ATTR = __NV_MACSEC_SA_ATTR_END, NV_MACSEC_SA_ATTR_MAX = __NV_MACSEC_SA_ATTR_END - 1, @@ -77,7 +97,12 @@ enum nv_macsec_tz_attrs { NV_MACSEC_TZ_ATTR_CTRL, NV_MACSEC_TZ_ATTR_RW, NV_MACSEC_TZ_ATTR_INDEX, +#ifdef NVPKCS_MACSEC + NV_MACSEC_TZ_PKCS_KEY_WRAP, + NV_MACSEC_TZ_PKCS_KEK_HANDLE, +#else NV_MACSEC_TZ_ATTR_KEY, +#endif /* NVPKCS_MACSEC */ NV_MACSEC_TZ_ATTR_FLAG, __NV_MACSEC_TZ_ATTR_END, NUM_NV_MACSEC_TZ_ATTR = __NV_MACSEC_TZ_ATTR_END, @@ -115,8 +140,14 @@ static const struct nla_policy nv_macsec_sa_genl_policy[NUM_NV_MACSEC_SA_ATTR] = [NV_MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, [NV_MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, [NV_MACSEC_SA_ATTR_LOWEST_PN] = { .type = NLA_U32 }, +#ifdef NVPKCS_MACSEC + [NV_MACSEC_SA_PKCS_KEY_WRAP] = { .type = NLA_BINARY, + .len = NV_SAK_WRAPPED_LEN,}, + [NV_MACSEC_SA_PKCS_KEK_HANDLE] = { .type = NLA_U64 }, +#else [NV_MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, - .len = OSI_KEY_LEN_128,}, + .len = OSI_KEY_LEN_256,}, +#endif /* NVPKCS_MACSEC */ }; static const struct nla_policy nv_macsec_tz_genl_policy[NUM_NV_MACSEC_TZ_ATTR] = { @@ -124,8 +155,14 @@ static const struct nla_policy nv_macsec_tz_genl_policy[NUM_NV_MACSEC_TZ_ATTR] = [NV_MACSEC_TZ_ATTR_CTRL] = { .type = NLA_U8 }, /* controller Tx or Rx */ [NV_MACSEC_TZ_ATTR_RW] = { .type = NLA_U8 }, [NV_MACSEC_TZ_ATTR_INDEX] = { .type = NLA_U8 }, +#ifdef NVPKCS_MACSEC + [NV_MACSEC_SA_PKCS_KEY_WRAP] = { .type = NLA_BINARY, + .len = NV_SAK_WRAPPED_LEN,}, + [NV_MACSEC_SA_PKCS_KEK_HANDLE] = { .type = NLA_U64 }, +#else [NV_MACSEC_TZ_ATTR_KEY] = { .type = NLA_BINARY, .len = OSI_KEY_LEN_256 }, +#endif /* NVPKCS_MACSEC */ [NV_MACSEC_TZ_ATTR_FLAG] = { .type = NLA_U32 }, }; @@ -177,6 +214,18 @@ struct macsec_supplicant_data { unsigned int cipher; }; +/** + * @brief MACsec supplicant pkcs data structure + */ +struct nvpkcs_data { + /** wrapped key */ + u8 nv_key[NV_SAK_WRAPPED_LEN]; + /** wrapped key length */ + int nv_key_len; + /** pkcs KEK handle(CK_OBJECT_HANDLE ) is u64 */ + u64 nv_kek; +}; + /** * @brief MACsec private data structure */ @@ -207,8 +256,6 @@ struct macsec_priv_data { unsigned int protect_frames; /** MACsec enabled flags for Tx/Rx controller status */ unsigned int enabled; - /** MACsec enabled flags for Tx/Rx controller status before Suspend */ - unsigned int enabled_before_suspend; /** MACsec Rx PN Window */ unsigned int pn_window; /** MACsec controller init reference count */ @@ -221,6 +268,16 @@ struct macsec_priv_data { struct mutex lock; /** macsec hw instance id */ unsigned int id; + /** Macsec enable flag in DT */ + unsigned int is_macsec_enabled_in_dt; + /** Context family name */ + struct genl_family nv_macsec_fam; + /** Flag to check if nv macsec nl registered */ + unsigned int is_nv_macsec_fam_registered; + /** Macsec TX currently enabled AN */ + unsigned int macsec_tx_an_map; + /** Macsec RX currently enabled AN */ + unsigned int macsec_rx_an_map; }; int macsec_probe(struct ether_priv_data *pdata); @@ -231,13 +288,13 @@ int macsec_close(struct macsec_priv_data *macsec_pdata); int macsec_suspend(struct macsec_priv_data *macsec_pdata); int macsec_resume(struct macsec_priv_data *macsec_pdata); -#ifdef MACSEC_DEBUG +#ifdef DEBUG_MACSEC #define PRINT_ENTRY() (printk(KERN_DEBUG "-->%s()\n", __func__)) #define PRINT_EXIT() (printk(KERN_DEBUG "<--%s()\n", __func__)) #else #define PRINT_ENTRY() #define PRINT_EXIT() -#endif /* MACSEC_DEBUG */ +#endif /* DEBUG_MACSEC */ #endif /* INCLUDED_MACSEC_H */ diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/osd.c b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/osd.c index 3e7f6364da..58162c7971 100644 --- a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/osd.c +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/osd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -215,7 +215,9 @@ static inline int ether_alloc_skb(struct ether_priv_data *pdata, dma_addr_t dma_addr; #endif unsigned long val; - if ((rx_swcx->flags & OSI_RX_SWCX_REUSE) == OSI_RX_SWCX_REUSE) { + + if (((rx_swcx->flags & OSI_RX_SWCX_REUSE) == OSI_RX_SWCX_REUSE) && + (rx_swcx->buf_virt_addr != pdata->osi_dma->resv_buf_virt_addr)) { /* Skip buffer allocation and DMA mapping since * PTP software context will have valid buffer and * DMA addresses so use them as is. @@ -232,8 +234,8 @@ static inline int ether_alloc_skb(struct ether_priv_data *pdata, rx_swcx->buf_virt_addr = pdata->osi_dma->resv_buf_virt_addr; rx_swcx->buf_phy_addr = pdata->osi_dma->resv_buf_phy_addr; rx_swcx->flags |= OSI_RX_SWCX_BUF_VALID; - val = pdata->osi_core->xstats.re_alloc_rxbuf_failed[chan]; - pdata->osi_core->xstats.re_alloc_rxbuf_failed[chan] = + val = pdata->xstats.re_alloc_rxbuf_failed[chan]; + pdata->xstats.re_alloc_rxbuf_failed[chan] = osi_update_stats_counter(val, 1UL); return 0; } @@ -254,8 +256,8 @@ static inline int ether_alloc_skb(struct ether_priv_data *pdata, rx_swcx->buf_virt_addr = pdata->osi_dma->resv_buf_virt_addr; rx_swcx->buf_phy_addr = pdata->osi_dma->resv_buf_phy_addr; rx_swcx->flags |= OSI_RX_SWCX_BUF_VALID; - val = pdata->osi_core->xstats.re_alloc_rxbuf_failed[chan]; - pdata->osi_core->xstats.re_alloc_rxbuf_failed[chan] = + val = pdata->xstats.re_alloc_rxbuf_failed[chan]; + pdata->xstats.re_alloc_rxbuf_failed[chan] = osi_update_stats_counter(val, 1UL); return 0; } @@ -881,6 +883,37 @@ static void osd_core_printf(struct osi_core_priv_data *osi_core, } #endif +void ether_restart_lane_bringup_task(struct tasklet_struct *t) +{ + struct ether_priv_data *pdata = from_tasklet(pdata, t, lane_restart_task); + + if (pdata->tx_start_stop == OSI_DISABLE) { + netif_tx_lock(pdata->ndev); + netif_carrier_off(pdata->ndev); + netif_tx_stop_all_queues(pdata->ndev); + netif_tx_unlock(pdata->ndev); + schedule_delayed_work(&pdata->set_speed_work, msecs_to_jiffies(500)); + if (netif_msg_drv(pdata)) { + netdev_info(pdata->ndev, "Disable network Tx Queue\n"); + } + } else if (pdata->tx_start_stop == OSI_ENABLE) { + netif_tx_lock(pdata->ndev); + netif_tx_start_all_queues(pdata->ndev); + netif_tx_unlock(pdata->ndev); + if (netif_msg_drv(pdata)) { + netdev_info(pdata->ndev, "Enable network Tx Queue\n"); + } + } +} + +static void osd_restart_lane_bringup(void *priv, unsigned int en_disable) +{ + struct ether_priv_data *pdata = (struct ether_priv_data *)priv; + + pdata->tx_start_stop = en_disable; + tasklet_hi_schedule(&pdata->lane_restart_task); +} + void ether_assign_osd_ops(struct osi_core_priv_data *osi_core, struct osi_dma_priv_data *osi_dma) { @@ -892,7 +925,7 @@ void ether_assign_osd_ops(struct osi_core_priv_data *osi_core, #ifdef OSI_DEBUG osi_core->osd_ops.printf = osd_core_printf; #endif - + osi_core->osd_ops.restart_lane_bringup = osd_restart_lane_bringup; osi_dma->osd_ops.transmit_complete = osd_transmit_complete; osi_dma->osd_ops.receive_packet = osd_receive_packet; osi_dma->osd_ops.realloc_buf = osd_realloc_buf; @@ -924,64 +957,49 @@ int osd_ivc_send_cmd(void *priv, ivc_msg_common_t *ivc_buf, unsigned int len) struct ether_ivc_ctxt *ictxt = &pdata->ictxt; struct tegra_hv_ivc_cookie *ivck = (struct tegra_hv_ivc_cookie *) ictxt->ivck; - int dcnt = IVC_CHANNEL_TIMEOUT_CNT; - int is_atomic = 0; + int status = -1; + unsigned long flags = 0; + if (len > ETHER_MAX_IVC_BUF) { dev_err(pdata->dev, "Invalid IVC len\n"); return -1; } - ivc_buf->status = -1; - if (in_atomic()) { - preempt_enable(); - is_atomic = 1; - } - - mutex_lock(&ictxt->ivck_lock); ivc_buf->count = cnt++; + + raw_spin_lock_irqsave(&ictxt->ivck_lock, flags); + /* Waiting for the channel to be ready */ - while (tegra_hv_ivc_channel_notified(ivck) != 0){ - osd_msleep(1); - dcnt--; - if (!dcnt) { - dev_err(pdata->dev, "IVC channel timeout\n"); - goto fail; - } + ret = readx_poll_timeout_atomic(tegra_hv_ivc_channel_notified, ivck, + status, status == 0, 10, IVC_WAIT_TIMEOUT_CNT); + if (ret == -ETIMEDOUT) { + dev_err(pdata->dev, "IVC channel timeout\n"); + goto fail; } /* Write the current message for the ethernet server */ ret = tegra_hv_ivc_write(ivck, ivc_buf, len); if (ret != len) { - dev_err(pdata->dev, "IVC write len %d ret %d cmd %d failed\n", - len, ret, ivc_buf->cmd); + dev_err(pdata->dev, "IVC write with len %d ret %d cmd %d ioctlcmd %d failed\n", + len, ret, ivc_buf->cmd, ivc_buf->data.ioctl_data.cmd); goto fail; } - dcnt = IVC_READ_TIMEOUT_CNT; - while ((!tegra_hv_ivc_can_read(ictxt->ivck))) { - if (!wait_for_completion_timeout(&ictxt->msg_complete, - IVC_WAIT_TIMEOUT)) { - ret = -ETIMEDOUT; - goto fail; - } - - dcnt--; - if (!dcnt) { - dev_err(pdata->dev, "IVC read timeout\n"); - break; - } + ret = readx_poll_timeout_atomic(tegra_hv_ivc_can_read, ictxt->ivck, + status, status, 10, IVC_WAIT_TIMEOUT_CNT); + if (ret == -ETIMEDOUT) { + dev_err(pdata->dev, "IVC read timeout status %d\n", status); + goto fail; } ret = tegra_hv_ivc_read(ivck, ivc_buf, len); if (ret < 0) { - dev_err(pdata->dev, "IVC read failed: %d\n", ret); + dev_err(pdata->dev, "IVC read failed: %d cmd %d ioctlcmd %d\n", + ret, ivc_buf->cmd, ivc_buf->data.ioctl_data.cmd); } ret = ivc_buf->status; fail: - mutex_unlock(&ictxt->ivck_lock); - if (is_atomic) { - preempt_disable(); - } + raw_spin_unlock_irqrestore(&ictxt->ivck_lock, flags); return ret; } diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ptp.c b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ptp.c index 3993e307b7..ecf7fe6d32 100644 --- a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ptp.c +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/ptp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -377,6 +377,9 @@ static void ether_config_slot_function(struct ether_priv_data *pdata, u32 set) sizeof(struct osi_core_avb_algorithm)); qinx = osi_core->mtl_queues[i]; ioctl_data.avb.qindex = qinx; + /* For EQOS harware library code use internally SP(0) and + For MGBE harware library code use internally ETS(2) if + algo != CBS. */ ioctl_data.avb.algo = OSI_MTL_TXQ_AVALG_SP; ioctl_data.avb.oper_mode = (set == OSI_ENABLE) ? OSI_MTL_QUEUE_AVB : @@ -402,7 +405,9 @@ int ether_handle_hwtstamp_ioctl(struct ether_priv_data *pdata, { struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_dma_priv_data *osi_dma = pdata->osi_dma; +#ifdef CONFIG_TEGRA_PTP_NOTIFIER struct net_device *ndev = pdata->ndev; +#endif struct osi_ioctl ioctl_data = {}; struct hwtstamp_config config; unsigned int hwts_rx_en = 1; diff --git a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/sysfs.c b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/sysfs.c index a0b12c33f8..7405de9ca8 100644 --- a/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/sysfs.c +++ b/kernel/nvidia/drivers/net/ethernet/nvidia/nvethernet/sysfs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -17,6 +17,10 @@ #include "ether_linux.h" #include "macsec.h" +#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ)) +#include +#endif + #ifdef CONFIG_DEBUG_FS /* As per IAS Docs */ #define EOQS_MAX_REGISTER_ADDRESS 0x12FC @@ -182,6 +186,42 @@ static ssize_t ether_mac_loopback_store(struct device *dev, } #ifdef MACSEC_SUPPORT + +/** + * @brief Shows the current setting of MACsec AN status + * + * Algorithm: Display the current MACsec AN enable status + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to store the current macsec an status + */ +static ssize_t macsec_an_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct macsec_priv_data *macsec_pdata = pdata->macsec_pdata; + unsigned int macsec_status = 0; + + if ((macsec_pdata->macsec_tx_an_map != 0U) && + (macsec_pdata->macsec_rx_an_map != 0U)) { + macsec_status = OSI_ENABLE; + } + + return scnprintf(buf, PAGE_SIZE, "%s\n", + (macsec_status == OSI_ENABLE) ? + "1" : "0"); +} + +/** + * @brief Sysfs attribute for MACsec irq stats + * + */ +static DEVICE_ATTR(macsec_an_status, (S_IRUGO | S_IWUSR), + macsec_an_status_show, + NULL); + /** * @brief Shows the current setting of MACsec controllers enabled * @@ -337,6 +377,7 @@ static DEVICE_ATTR(macsec_cipher, (S_IRUGO | S_IWUSR), macsec_cipher_show, macsec_cipher_store); +#ifdef DEBUG_MACSEC /** * @brief Shows the current setting of MACsec loopback * @@ -421,8 +462,29 @@ static ssize_t macsec_loopback_store(struct device *dev, static DEVICE_ATTR(macsec_loopback, (S_IRUGO | S_IWUSR), macsec_loopback_show, macsec_loopback_store); +#endif /* DEBUG_MACSEC */ #ifdef HSI_SUPPORT +#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ)) +static int hsi_inject_err_fsi(unsigned int inst_id, + struct epl_error_report_frame error_report, + void *data) +{ + struct ether_priv_data *pdata = (struct ether_priv_data *)data; + struct osi_core_priv_data *osi_core = pdata->osi_core; + struct osi_ioctl ioctl_data = {}; + int ret; + + ioctl_data.cmd = OSI_CMD_HSI_INJECT_ERR; + ioctl_data.arg1_u32 = error_report.error_code; + ret = osi_handle_ioctl(osi_core, &ioctl_data); + if (ret < 0) + dev_err(pdata->dev, "Fail to inject error\n"); + + return ret; +} +#endif + /** * @brief Shows HSI feature enabled status * @@ -439,6 +501,11 @@ static ssize_t hsi_enable_show(struct device *dev, struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; + if (osi_core->use_virtualization == OSI_ENABLE) { + dev_err(pdata->dev, "Not supported with Ethernet virtualization enabled\n"); + return 0; + } + return scnprintf(buf, PAGE_SIZE, "%s\n", (osi_core->hsi.enabled == OSI_ENABLE) ? "enabled" : "disabled"); @@ -465,6 +532,14 @@ static ssize_t hsi_enable_store(struct device *dev, struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_ioctl ioctl_data = {}; int ret = 0; +#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ)) + u32 inst_id = osi_core->instance_id; + u32 ip_type[2] = {IP_EQOS, IP_MGBE}; +#endif + if (osi_core->use_virtualization == OSI_ENABLE) { + dev_err(pdata->dev, "Not supported with Ethernet virtualization enabled\n"); + return size; + } if (!netif_running(ndev)) { dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); @@ -481,6 +556,17 @@ static ssize_t hsi_enable_store(struct device *dev, } else { osi_core->hsi.enabled = OSI_ENABLE; dev_info(pdata->dev, "HSI Enabled\n"); +#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ)) + if (osi_core->instance_id == OSI_INSTANCE_ID_EQOS) + inst_id = 0; + + ret = hsierrrpt_reg_cb(ip_type[osi_core->mac], inst_id, + hsi_inject_err_fsi, pdata); + if (ret != 0) { + dev_err(pdata->dev, "Err inj callback registration failed: %d", + ret); + } +#endif } } else if (strncmp(buf, "disable", 7) == OSI_NONE) { ioctl_data.arg1_u32 = OSI_DISABLE; @@ -491,6 +577,16 @@ static ssize_t hsi_enable_store(struct device *dev, } else { osi_core->hsi.enabled = OSI_DISABLE; dev_info(pdata->dev, "HSI Disabled\n"); +#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ)) + if (osi_core->instance_id == OSI_INSTANCE_ID_EQOS) + inst_id = 0; + + ret = hsierrrpt_dereg_cb(ip_type[osi_core->mac], inst_id); + if (ret != 0) { + dev_err(pdata->dev, "Err inj callback deregistration failed: %d", + ret); + } +#endif } } else { dev_err(pdata->dev, @@ -1002,7 +1098,7 @@ static DEVICE_ATTR(macsec_mmc_counters, (S_IRUGO | S_IWUSR), macsec_mmc_counters_show, NULL); - +#ifdef DEBUG_MACSEC static void dump_dbg_buffers(char **buf_p, unsigned short ctlr_sel, struct osi_core_priv_data *osi_core) { @@ -1018,7 +1114,7 @@ static void dump_dbg_buffers(char **buf_p, unsigned short ctlr_sel, } for (i = 0; i < idx_max; i++) { memset(&dbg_buf_config, OSI_NONE, sizeof(dbg_buf_config)); - dbg_buf_config.rw = OSI_DBG_TBL_READ; + dbg_buf_config.rw = OSI_LUT_READ; dbg_buf_config.ctlr_sel = ctlr_sel; dbg_buf_config.index = i; if (osi_macsec_config_dbg_buf(osi_core, &dbg_buf_config) < 0) { @@ -1037,7 +1133,7 @@ static void dump_dbg_buffers(char **buf_p, unsigned short ctlr_sel, /* reset debug buffer after buf read */ for (i = 0; i < idx_max; i++) { memset(&dbg_buf_config, OSI_NONE, sizeof(dbg_buf_config)); - dbg_buf_config.rw = OSI_DBG_TBL_WRITE; + dbg_buf_config.rw = OSI_LUT_WRITE; dbg_buf_config.ctlr_sel = ctlr_sel; dbg_buf_config.index = i; if (osi_macsec_config_dbg_buf(osi_core, &dbg_buf_config) < 0) { @@ -1131,7 +1227,7 @@ static ssize_t macsec_dbg_events_store(struct device *dev, } } dbg_buf_config.ctlr_sel = controller; - dbg_buf_config.rw = OSI_DBG_TBL_WRITE; + dbg_buf_config.rw = OSI_LUT_WRITE; if (osi_macsec_dbg_events_config(osi_core, &dbg_buf_config) < 0) { dev_err(dev, "%s: Failed to config dbg trigger events\n", __func__); @@ -1153,6 +1249,7 @@ static ssize_t macsec_dbg_events_store(struct device *dev, static DEVICE_ATTR(macsec_dbg_events, (S_IRUGO | S_IWUSR), NULL, macsec_dbg_events_store); +#endif /* DEBUG_MACSEC */ /** * @brief Shows the current SCI LUT configuration @@ -1657,7 +1754,8 @@ static ssize_t macsec_sc_state_lut_store(struct device *dev, struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_macsec_lut_config lut_config; int index, ctlr; - int ret, curr_an; + int ret; + nveu32_t curr_an; if (!netif_running(ndev)) { dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); @@ -1672,7 +1770,7 @@ static ssize_t macsec_sc_state_lut_store(struct device *dev, if ((index > OSI_SC_LUT_MAX_INDEX) || (ctlr != OSI_CTLR_SEL_TX && ctlr != OSI_CTLR_SEL_RX) || - (curr_an > OSI_CURR_AN_MAX)) { + (curr_an >= OSI_MAX_NUM_SA)) { dev_err(pdata->dev, "%s:Invalid inputs", __func__); goto exit; } @@ -2626,11 +2724,14 @@ static struct attribute *ether_sysfs_attrs[] = { &dev_attr_macsec_sa_state_lut.attr, &dev_attr_macsec_sc_param_lut.attr, &dev_attr_macsec_cipher.attr, - &dev_attr_macsec_loopback.attr, &dev_attr_macsec_enable.attr, + &dev_attr_macsec_an_status.attr, &dev_attr_macsec_mmc_counters.attr, +#ifdef DEBUG_MACSEC + &dev_attr_macsec_loopback.attr, &dev_attr_macsec_dbg_buffers.attr, &dev_attr_macsec_dbg_events.attr, +#endif /* DEBUG_MACSEC */ #endif /* MACSEC_SUPPORT */ &dev_attr_uphy_gbe_mode.attr, &dev_attr_phy_iface_mode.attr, @@ -3227,22 +3328,24 @@ static void ether_remove_debugfs(struct ether_priv_data *pdata) int ether_sysfs_register(struct ether_priv_data *pdata) { struct device *dev = pdata->dev; - -#ifdef CONFIG_DEBUG_FS int ret = 0; - ret = ether_create_debugfs(pdata); - if (ret < 0) - return ret; +#ifdef CONFIG_DEBUG_FS + /* Intentionally ignored the return value of debugfs + * and continues to initialize the driver even it fails + * to support Linux Production profile + */ + ether_create_debugfs(pdata); #endif + /* Create nvethernet sysfs group under /sys/devices// */ - return sysfs_create_group(&dev->kobj, ðer_attribute_group); + ret = sysfs_create_group(&dev->kobj, ðer_attribute_group); + return ret; } void ether_sysfs_unregister(struct ether_priv_data *pdata) { struct device *dev = pdata->dev; - #ifdef CONFIG_DEBUG_FS ether_remove_debugfs(pdata); #endif diff --git a/kernel/nvidia/drivers/nvpmodel/nvpmodel_emc_cap.c b/kernel/nvidia/drivers/nvpmodel/nvpmodel_emc_cap.c index 72c1046877..2f676c1398 100644 --- a/kernel/nvidia/drivers/nvpmodel/nvpmodel_emc_cap.c +++ b/kernel/nvidia/drivers/nvpmodel/nvpmodel_emc_cap.c @@ -3,7 +3,7 @@ * * NVIDIA Tegra Nvpmodel driver for Tegra chips * - * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -285,7 +285,7 @@ static int __init nvpmodel_clk_cap_init(void) continue; } sysfs_attr_init(&(clks[i].attr.attr)); - clks[i].attr.attr.mode = 0660; + clks[i].attr.attr.mode = 0664; clks[i].attr.show = clk_cap_show; clks[i].attr.store = clk_cap_store; if (sysfs_create_file(clk_cap_kobject, diff --git a/kernel/nvidia/drivers/platform/tegra/dce/Makefile b/kernel/nvidia/drivers/platform/tegra/dce/Makefile index dbf92914dd..661c211b08 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/Makefile +++ b/kernel/nvidia/drivers/platform/tegra/dce/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Display Controller Engine code. # diff --git a/kernel/nvidia/drivers/platform/tegra/dce/dce-admin.c b/kernel/nvidia/drivers/platform/tegra/dce/dce-admin.c index b47f9795cf..e4b6ff8d89 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/dce-admin.c +++ b/kernel/nvidia/drivers/platform/tegra/dce/dce-admin.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/nvidia/drivers/platform/tegra/dce/dce-bootstrap.c b/kernel/nvidia/drivers/platform/tegra/dce/dce-bootstrap.c index 665e57c743..3babbb3c40 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/dce-bootstrap.c +++ b/kernel/nvidia/drivers/platform/tegra/dce/dce-bootstrap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -171,6 +171,7 @@ dce_start_boot_flow(struct tegra_dce *d) } else { d->boot_status |= DCE_FW_BOOT_DONE; dce_info(d, "DCE_BOOT_DONE"); + dce_cond_broadcast_interruptible(&d->dce_bootstrap_done); } exit: diff --git a/kernel/nvidia/drivers/platform/tegra/dce/dce-client-ipc.c b/kernel/nvidia/drivers/platform/tegra/dce/dce-client-ipc.c index bd041c1dae..664101633a 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/dce-client-ipc.c +++ b/kernel/nvidia/drivers/platform/tegra/dce/dce-client-ipc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -136,7 +136,7 @@ int tegra_dce_register_ipc_client(u32 type, { int ret; uint32_t int_type; - struct tegra_dce *d; + struct tegra_dce *d = NULL; struct tegra_dce_client_ipc *cl; u32 handle = DCE_CLIENT_IPC_HANDLE_INVALID; @@ -160,6 +160,18 @@ int tegra_dce_register_ipc_client(u32 type, goto out; } + /* + * Wait for bootstrapping to complete before client IPC registration + */ +#define DCE_IPC_REGISTER_BOOT_WAIT (30U * 1000) + ret = DCE_COND_WAIT_INTERRUPTIBLE_TIMEOUT(&d->dce_bootstrap_done, + dce_is_bootstrap_done(d), + DCE_IPC_REGISTER_BOOT_WAIT); + if (ret) { + dce_info(d, "dce boot wait failed (%d)\n", ret); + goto out; + } + ret = dce_client_ipc_handle_alloc(&handle); if (ret) goto out; diff --git a/kernel/nvidia/drivers/platform/tegra/dce/dce-debug.c b/kernel/nvidia/drivers/platform/tegra/dce/dce-debug.c index 39b37f75a5..d806257857 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/dce-debug.c +++ b/kernel/nvidia/drivers/platform/tegra/dce/dce-debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -424,6 +424,9 @@ static ssize_t dbg_dce_boot_status_fops_read(struct file *file, last_status = DCE_BIT(find_first_bit(addr, 32)); switch (last_status) { + case DCE_FW_SUSPENDED: + strcpy(buf, "DCE_FW_SUSPENDED"); + break; case DCE_FW_BOOT_DONE: strcpy(buf, "DCE_FW_BOOT_DONE"); break; diff --git a/kernel/nvidia/drivers/platform/tegra/dce/dce-fsm.c b/kernel/nvidia/drivers/platform/tegra/dce/dce-fsm.c index e6d8a2127d..155ad4b6f4 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/dce-fsm.c +++ b/kernel/nvidia/drivers/platform/tegra/dce/dce-fsm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/nvidia/drivers/platform/tegra/dce/dce-init-deinit.c b/kernel/nvidia/drivers/platform/tegra/dce/dce-init-deinit.c index ba9d95717d..ae9bcc20cf 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/dce-init-deinit.c +++ b/kernel/nvidia/drivers/platform/tegra/dce/dce-init-deinit.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -26,6 +26,11 @@ int dce_driver_init(struct tegra_dce *d) { int ret = 0; + /** + * Set dce boot satus to false + */ + dce_set_boot_complete(d, false); + ret = dce_boot_interface_init(d); if (ret) { dce_err(d, "dce boot interface init failed"); diff --git a/kernel/nvidia/drivers/platform/tegra/dce/dce-ipc.c b/kernel/nvidia/drivers/platform/tegra/dce/dce-ipc.c index fe5bc3b844..b61b04ed88 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/dce-ipc.c +++ b/kernel/nvidia/drivers/platform/tegra/dce/dce-ipc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -11,6 +11,8 @@ * more details. */ +#include + #include #include #include @@ -287,6 +289,9 @@ int dce_ipc_channel_init(struct tegra_dce *d, u32 ch_type) struct dce_ipc_region *r; struct dce_ipc_channel *ch; struct dce_ipc_queue_info *q_info; +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + struct iosys_map rx, tx; +#endif if (ch_type >= DCE_IPC_CH_KMD_TYPE_MAX) { dce_err(d, "Invalid ivc channel ch_type : [%d]", ch_type); @@ -336,16 +341,24 @@ int dce_ipc_channel_init(struct tegra_dce *d, u32 ch_type) dev = dev_from_dce(d); +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + iosys_map_set_vaddr_iomem(&rx, r->base + r->s_offset); + iosys_map_set_vaddr_iomem(&tx, r->base + r->s_offset + q_sz); + + ret = tegra_ivc_init(&ch->d_ivc, NULL, &rx, r->iova + r->s_offset, &tx, + r->iova + r->s_offset + q_sz, q_info->nframes, msg_sz, + dce_ipc_signal_target, NULL); +#else ret = tegra_ivc_init(&ch->d_ivc, NULL, r->base + r->s_offset, r->iova + r->s_offset, r->base + r->s_offset + q_sz, r->iova + r->s_offset + q_sz, q_info->nframes, msg_sz, dce_ipc_signal_target, NULL); +#endif if (ret) { dce_err(d, "IVC creation failed"); goto out_lock_destroy; } - ch->flags |= DCE_IPC_CHANNEL_INITIALIZED; q_info->rx_iova = r->iova + r->s_offset; @@ -424,20 +437,19 @@ struct tegra_dce *dce_ipc_get_dce_from_ch(u32 ch_type) */ bool dce_ipc_channel_is_ready(struct tegra_dce *d, u32 ch_type) { - bool ret; + bool is_est; struct dce_ipc_channel *ch = d->d_ipc.ch[ch_type]; dce_mutex_lock(&ch->lock); - ret = (tegra_ivc_notified(&ch->d_ivc) ? false : true); + is_est = (tegra_ivc_notified(&ch->d_ivc) ? false : true); - if (ret == false) - ch->signal.notify(d, &ch->signal.to_d); + ch->signal.notify(d, &ch->signal.to_d); dce_mutex_unlock(&ch->lock); - return ret; + return is_est; } /** @@ -512,6 +524,15 @@ void dce_ipc_channel_reset(struct tegra_dce *d, u32 ch_type) */ static int _dce_ipc_get_next_write_buff(struct dce_ipc_channel *ch) { +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + int err; + + err = tegra_ivc_write_get_next_frame(&ch->d_ivc, &ch->obuff); + if (err) { + iosys_map_clear(&ch->obuff); + return err; + } +#else void *frame = NULL; frame = tegra_ivc_write_get_next_frame(&ch->d_ivc); @@ -522,6 +543,7 @@ static int _dce_ipc_get_next_write_buff(struct dce_ipc_channel *ch) } ch->obuff = frame; +#endif return 0; } @@ -544,6 +566,16 @@ static int _dce_ipc_write_channel(struct dce_ipc_channel *ch, * of the IVC frame */ +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + if ((ch->flags & DCE_IPC_CHANNEL_MSG_HEADER) != 0U) { + iosys_map_wr_field(&ch->obuff, 0, struct dce_ipc_header, length, + size); + iosys_map_incr(&ch->obuff, sizeof(*hdr)); + } + + if (data && size > 0) + iosys_map_memcpy_to(&ch->obuff, 0, data, size); +#else if ((ch->flags & DCE_IPC_CHANNEL_MSG_HEADER) != 0U) { hdr = (struct dce_ipc_header *)ch->obuff; hdr->length = (uint32_t)size; @@ -552,6 +584,7 @@ static int _dce_ipc_write_channel(struct dce_ipc_channel *ch, if (data && size > 0) memcpy(ch->obuff, data, size); +#endif return tegra_ivc_write_advance(&ch->d_ivc); } @@ -608,6 +641,15 @@ int dce_ipc_send_message(struct tegra_dce *d, u32 ch_type, */ static int _dce_ipc_get_next_read_buff(struct dce_ipc_channel *ch) { +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + int err; + + err = tegra_ivc_read_get_next_frame(&ch->d_ivc, &ch->ibuff); + if (err) { + iosys_map_clear(&ch->ibuff); + return err; + } +#else void *frame = NULL; frame = tegra_ivc_read_get_next_frame(&ch->d_ivc); @@ -618,6 +660,7 @@ static int _dce_ipc_get_next_read_buff(struct dce_ipc_channel *ch) } ch->ibuff = frame; +#endif return 0; } @@ -639,6 +682,16 @@ static int _dce_ipc_read_channel(struct dce_ipc_channel *ch, * Get actual length information from the top * of the IVC frame */ +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + if ((ch->flags & DCE_IPC_CHANNEL_MSG_HEADER) != 0U) { + iosys_map_wr_field(&ch->ibuff, 0, struct dce_ipc_header, length, + size); + iosys_map_incr(&ch->ibuff, sizeof(*hdr)); + } + + if (data && size > 0) + iosys_map_memcpy_from(data, &ch->ibuff, 0, size); +#else if ((ch->flags & DCE_IPC_CHANNEL_MSG_HEADER) != 0U) { hdr = (struct dce_ipc_header *)ch->ibuff; size = (size_t)(hdr->length); @@ -647,6 +700,7 @@ static int _dce_ipc_read_channel(struct dce_ipc_channel *ch, if (data && size > 0) memcpy(data, ch->ibuff, size); +#endif return tegra_ivc_read_advance(&ch->d_ivc); } @@ -791,14 +845,23 @@ int dce_ipc_get_region_iova_info(struct tegra_dce *d, u64 *iova, u32 *size) bool dce_ipc_is_data_available(struct tegra_dce *d, u32 ch_type) { bool ret = false; +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + struct iosys_map map; +#else void *frame; +#endif struct dce_ipc_channel *ch = d->d_ipc.ch[ch_type]; dce_mutex_lock(&ch->lock); +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + if (!tegra_ivc_read_get_next_frame(&ch->d_ivc, &map)) + ret = true; +#else frame = tegra_ivc_read_get_next_frame(&ch->d_ivc); if (!IS_ERR(frame)) ret = true; +#endif dce_mutex_unlock(&ch->lock); diff --git a/kernel/nvidia/drivers/platform/tegra/dce/dce-module.c b/kernel/nvidia/drivers/platform/tegra/dce/dce-module.c index adee68fb10..24d76ec3f0 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/dce-module.c +++ b/kernel/nvidia/drivers/platform/tegra/dce/dce-module.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -58,6 +58,19 @@ static inline struct tegra_dce *dce_get_pdata_dce(struct platform_device *pdev) return (&((struct dce_device *)dev_get_drvdata(&pdev->dev))->d); } +/** + * dce_get_tegra_dce_from_dev - inline function to get the tegra_dce pointer + * from devicve struct. + * + * @pdev : Pointer to the device data structure. + * + * Return : Pointer pointing to tegra_dce data structure. + */ +static inline struct tegra_dce *dce_get_tegra_dce_from_dev(struct device *dev) +{ + return (&((struct dce_device *)dev_get_drvdata(dev))->d); +} + /** * dce_init_dev_data - Function to initialize the dce device data structure. * @@ -255,7 +268,24 @@ static int tegra_dce_remove(struct platform_device *pdev) } #ifdef CONFIG_PM -extern const struct dev_pm_ops dce_pm_ops; +static int dce_pm_suspend(struct device *dev) +{ + struct tegra_dce *d = dce_get_tegra_dce_from_dev(dev); + + return dce_pm_enter_sc7(d); +} + +static int dce_pm_resume(struct device *dev) +{ + struct tegra_dce *d = dce_get_tegra_dce_from_dev(dev); + + return dce_pm_exit_sc7(d); +} + +const struct dev_pm_ops dce_pm_ops = { + .suspend = dce_pm_suspend, + .resume = dce_pm_resume, +}; #endif static struct platform_driver tegra_dce_driver = { diff --git a/kernel/nvidia/drivers/platform/tegra/dce/dce-pm.c b/kernel/nvidia/drivers/platform/tegra/dce/dce-pm.c index b67f384975..b53915a4ae 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/dce-pm.c +++ b/kernel/nvidia/drivers/platform/tegra/dce/dce-pm.c @@ -1,21 +1,18 @@ /* - * Copyright (C) 2022, NVIDIA Corporation. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #include -#ifdef CONFIG_PM - #define CCPLEX_HSP_IE 1U /* TODO : Have an api to read from platform data */ static void dce_pm_save_state(struct tegra_dce *d) @@ -30,6 +27,35 @@ static void dce_pm_restore_state(struct tegra_dce *d) dce_hsp_ie_write(d, val, CCPLEX_HSP_IE); } +/** + * dce_resume_work_fn : execute resume and bootstrap flow + * + * @d : Pointer to tegra_dce struct. + * + * Return : void + */ +void dce_resume_work_fn(struct tegra_dce *d) +{ + int ret = 0; + + if (d == NULL) { + dce_err(d, "tegra_dce struct is NULL"); + return; + } + + ret = dce_fsm_post_event(d, EVENT_ID_DCE_BOOT_COMPLETE_REQUESTED, NULL); + if (ret) { + dce_err(d, "Error while posting DCE_BOOT_COMPLETE_REQUESTED event"); + return; + } + + ret = dce_start_boot_flow(d); + if (ret) { + dce_err(d, "DCE bootstrapping failed\n"); + return; + } +} + /** * dce_handle_sc7_enter_requested_event - callback handler function for event * EVENT_ID_DCE_SC7_ENTER_REQUESTED @@ -56,6 +82,9 @@ int dce_pm_handle_sc7_enter_requested_event(struct tegra_dce *d, void *params) goto out; } + dce_set_boot_complete(d, false); + d->boot_status |= DCE_FW_SUSPENDED; + out: dce_admin_free_message(d, msg); return ret; @@ -91,19 +120,24 @@ int dce_pm_handle_sc7_exit_received_event(struct tegra_dce *d, void *params) return 0; } -static int dce_pm_suspend(struct device *dev) +int dce_pm_enter_sc7(struct tegra_dce *d) { int ret = 0; - struct tegra_dce *d; - struct dce_device *d_dev = NULL; struct dce_ipc_message *msg = NULL; - d_dev = dev_get_drvdata(dev); - d = &d_dev->d; + /* + * If Bootstrap is not yet done. Nothing to do during SC7 Enter + * Return success immediately. + */ + if (!dce_is_bootstrap_done(d)) { + dce_debug(d, "Bootstrap not done, Succeed SC7 enter\n"); + goto out; + } msg = dce_admin_allocate_message(d); if (!msg) { dce_err(d, "IPC msg allocation failed"); + ret = -1; goto out; } @@ -112,12 +146,14 @@ static int dce_pm_suspend(struct device *dev) ret = dce_admin_send_prepare_sc7(d, msg); if (ret) { dce_err(d, "Prepare SC7 failed [%d]", ret); + ret = -1; goto out; } ret = dce_fsm_post_event(d, EVENT_ID_DCE_SC7_ENTER_REQUESTED, NULL); if (ret) { dce_err(d, "Error while posting SC7_ENTER event [%d]", ret); + ret = -1; goto out; } @@ -126,14 +162,9 @@ static int dce_pm_suspend(struct device *dev) return ret; } -static int dce_pm_resume(struct device *dev) +int dce_pm_exit_sc7(struct tegra_dce *d) { int ret = 0; - struct tegra_dce *d; - struct dce_device *d_dev = NULL; - - d_dev = dev_get_drvdata(dev); - d = &d_dev->d; dce_pm_restore_state(d); @@ -145,27 +176,3 @@ static int dce_pm_resume(struct device *dev) out: return ret; } - -const struct dev_pm_ops dce_pm_ops = { - .suspend = dce_pm_suspend, - .resume = dce_pm_resume, -}; - -#else - -int dce_pm_handle_sc7_enter_requested_event(struct tegra_dce *d, void *params) -{ - return 0; -} - -int dce_pm_handle_sc7_enter_received_event(struct tegra_dce *d, void *params) -{ - return 0; -} - -int dce_pm_handle_sc7_exit_received_event(struct tegra_dce *d, void *params) -{ - return 0; -} - -#endif diff --git a/kernel/nvidia/drivers/platform/tegra/dce/dce-util-common.c b/kernel/nvidia/drivers/platform/tegra/dce/dce-util-common.c index 0bcc4b3b51..e2980b0ea6 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/dce-util-common.c +++ b/kernel/nvidia/drivers/platform/tegra/dce/dce-util-common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -610,7 +610,7 @@ unsigned long dce_get_nxt_pow_of_2(unsigned long *addr, u8 nbits) } /* - * dce_schedule_work : schedule work in global workqueue + * dce_schedule_work : schedule work in global highpri workqueue * * @work : dce work to be scheduled * @@ -618,7 +618,7 @@ unsigned long dce_get_nxt_pow_of_2(unsigned long *addr, u8 nbits) */ void dce_schedule_work(struct dce_work_struct *work) { - schedule_work(&work->work); + queue_work(system_highpri_wq, &work->work); } /* diff --git a/kernel/nvidia/drivers/platform/tegra/dce/dce-worker.c b/kernel/nvidia/drivers/platform/tegra/dce/dce-worker.c index f6271452e5..43cbff8a61 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/dce-worker.c +++ b/kernel/nvidia/drivers/platform/tegra/dce/dce-worker.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,14 +36,23 @@ int dce_wait_interruptible(struct tegra_dce *d, u32 msg_id) } wait = &d->ipc_waits[msg_id]; - atomic_set(&wait->complete, 0); + /* + * It is possible that we received the ACK from DCE even before we + * start waiting. But that should not be an issue as wait->complete + * Will be "1" and we immediately exit from the wait. + */ DCE_COND_WAIT_INTERRUPTIBLE(&wait->cond_wait, atomic_read(&wait->complete) == 1); if (atomic_read(&wait->complete) != 1) return -EINTR; + /* + * Clear wait->complete as soon as we exit from wait (consume the wake call) + * So that when the next dce_wait_interruptible is called, it doesn't see old + * wait->complete state. + */ atomic_set(&wait->complete, 0); return 0; } @@ -67,39 +76,15 @@ void dce_wakeup_interruptible(struct tegra_dce *d, u32 msg_id) wait = &d->ipc_waits[msg_id]; + /* + * Set wait->complete to "1", so if the wait is called even after + * "dce_cond_signal_interruptible", it'll see the complete variable + * as "1" and exit the wait immediately. + */ atomic_set(&wait->complete, 1); dce_cond_signal_interruptible(&wait->cond_wait); } -/** - * dce_resume_work_fn : execute resume and bootstrap flow - * - * @d : Pointer to tegra_dce struct. - * - * Return : void - */ -void dce_resume_work_fn(struct tegra_dce *d) -{ - int ret = 0; - - if (d == NULL) { - dce_err(d, "tegra_dce struct is NULL"); - return; - } - - ret = dce_fsm_post_event(d, EVENT_ID_DCE_BOOT_COMPLETE_REQUESTED, NULL); - if (ret) { - dce_err(d, "Error while posting DCE_BOOT_COMPLETE_REQUESTED event"); - return; - } - - ret = dce_start_boot_flow(d); - if (ret) { - dce_err(d, "DCE bootstrapping failed\n"); - return; - } -} - /** * dce_work_cond_sw_resource_init : Init dce workqueues related resources * @@ -114,7 +99,7 @@ int dce_work_cond_sw_resource_init(struct tegra_dce *d) ret = dce_init_work(d, &d->dce_fsm_bootstrap_work, dce_bootstrap_work_fn); if (ret) { - dce_err(d, "fsm_start work init failed"); + dce_err(d, "Bootstrap work init failed"); goto exit; } @@ -124,6 +109,12 @@ int dce_work_cond_sw_resource_init(struct tegra_dce *d) goto exit; } + if (dce_cond_init(&d->dce_bootstrap_done)) { + dce_err(d, "dce boot wait condition init failed"); + ret = -1; + goto exit; + } + for (i = 0; i < DCE_MAX_WAIT; i++) { struct dce_wait_cond *wait = &d->ipc_waits[i]; @@ -144,6 +135,7 @@ int dce_work_cond_sw_resource_init(struct tegra_dce *d) dce_cond_destroy(&wait->cond_wait); i--; } + dce_cond_destroy(&d->dce_bootstrap_done); exit: return ret; } @@ -165,4 +157,6 @@ void dce_work_cond_sw_resource_deinit(struct tegra_dce *d) dce_cond_destroy(&wait->cond_wait); atomic_set(&wait->complete, 0); } + + dce_cond_destroy(&d->dce_bootstrap_done); } diff --git a/kernel/nvidia/drivers/platform/tegra/dce/include/dce-fsm.h b/kernel/nvidia/drivers/platform/tegra/dce/include/dce-fsm.h index 7c5d4ee04b..0621116ec6 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/include/dce-fsm.h +++ b/kernel/nvidia/drivers/platform/tegra/dce/include/dce-fsm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/kernel/nvidia/drivers/platform/tegra/dce/include/dce-ipc.h b/kernel/nvidia/drivers/platform/tegra/dce/include/dce-ipc.h index 50cb45346a..1baa61c05b 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/include/dce-ipc.h +++ b/kernel/nvidia/drivers/platform/tegra/dce/include/dce-ipc.h @@ -14,6 +14,7 @@ #ifndef DCE_IPC_H #define DCE_IPC_H +#include #include #include #include @@ -128,8 +129,13 @@ struct dce_ipc_channel { u32 w_type; u32 ch_type; u32 ipc_type; +#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE) + struct iosys_map ibuff; + struct iosys_map obuff; +#else void *ibuff; void *obuff; +#endif struct tegra_ivc d_ivc; struct tegra_dce *d; struct dce_mutex lock; diff --git a/kernel/nvidia/drivers/platform/tegra/dce/include/dce-pm.h b/kernel/nvidia/drivers/platform/tegra/dce/include/dce-pm.h index e154131b7d..6cb4b3b184 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/include/dce-pm.h +++ b/kernel/nvidia/drivers/platform/tegra/dce/include/dce-pm.h @@ -1,15 +1,14 @@ /* - * Copyright (C) 2022, NVIDIA Corporation. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ #ifndef DCE_PM_H @@ -21,6 +20,9 @@ struct dce_sc7_state { uint32_t hsp_ie; }; +int dce_pm_enter_sc7(struct tegra_dce *d); +int dce_pm_exit_sc7(struct tegra_dce *d); +void dce_resume_work_fn(struct tegra_dce *d); int dce_pm_handle_sc7_enter_requested_event(struct tegra_dce *d, void *params); int dce_pm_handle_sc7_enter_received_event(struct tegra_dce *d, void *params); int dce_pm_handle_sc7_exit_received_event(struct tegra_dce *d, void *params); diff --git a/kernel/nvidia/drivers/platform/tegra/dce/include/dce.h b/kernel/nvidia/drivers/platform/tegra/dce/include/dce.h index f95c42a747..b4604d9874 100644 --- a/kernel/nvidia/drivers/platform/tegra/dce/include/dce.h +++ b/kernel/nvidia/drivers/platform/tegra/dce/include/dce.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -56,6 +56,7 @@ #define DCE_FW_ADMIN_SEQ_START DCE_BIT(10) #define DCE_FW_ADMIN_SEQ_FAILED DCE_BIT(9) #define DCE_FW_ADMIN_SEQ_DONE DCE_BIT(8) +#define DCE_FW_SUSPENDED DCE_BIT(2) #define DCE_FW_BOOT_DONE DCE_BIT(1) #define DCE_STATUS_FAILED DCE_BIT(0) #define DCE_STATUS_UNKNOWN ((u32)(0)) @@ -161,6 +162,10 @@ struct tegra_dce { * dce_wait_info - Data structure to manage wait for different event types */ struct dce_wait_cond ipc_waits[DCE_MAX_WAIT]; + /** + * dce_bootstrap_done - Data structure to manage wait for boot done + */ + struct dce_cond dce_bootstrap_done; /** * @d_mb - Stores the current status of dce mailbox interfaces. */ @@ -297,6 +302,8 @@ static inline struct dce_platform_data *pdata_from_dce(struct tegra_dce *d) static inline void dce_set_boot_complete(struct tegra_dce *d, bool val) { d->boot_complete = val; + if (!val) + d->boot_status &= (~DCE_FW_BOOT_DONE); } /** diff --git a/kernel/nvidia/drivers/platform/tegra/mc/mcerr-t19x.c b/kernel/nvidia/drivers/platform/tegra/mc/mcerr-t19x.c index 5d3ba95c9e..17640d6348 100644 --- a/kernel/nvidia/drivers/platform/tegra/mc/mcerr-t19x.c +++ b/kernel/nvidia/drivers/platform/tegra/mc/mcerr-t19x.c @@ -1,7 +1,7 @@ /* * Tegra 19x SoC-specific mcerr code. * - * Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved. + * Copyright (c) 2017-2023, NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -290,8 +290,14 @@ static struct mc_client mc_clients[] = { static int mc_client_last = ARRAY_SIZE(mc_clients) - 1; /*** Done. ***/ +static u32 mc_channel = MC_BROADCAST_CHANNEL; static u32 global_intstatus; static u32 global_intstatus_1; +static u32 slice_int_status; +static u32 ch_int_status; +static u32 hubc_int_status; +static u32 sbs_int_status; +static u32 hub_int_status; static const char *intr_info[] = { NULL, /* Bit 0 */ @@ -482,12 +488,50 @@ static const struct mc_error sbs_mc_errors[] = { static void set_intstatus(unsigned int irq) { + mc_channel = 0; + global_intstatus = 0; + global_intstatus_1 = 0; + slice_int_status = 0; + ch_int_status = 0; + hubc_int_status = 0; + sbs_int_status = 0; + hub_int_status = 0; } static void save_intstatus(unsigned int irq) { global_intstatus = mc_readl(MC_GLOBAL_INTSTATUS); global_intstatus_1 = mc_readl(MC_GLOBAL_INTSTATUS_1); + + /* + * If multiple interrupts come in just handle the first one we see. The + * HW only keeps track of 1 interrupt's data and we don't know which + * particular fault is actually being kept... + */ + + if (global_intstatus & GIS_CH_MASK) { + mc_channel = __ffs(global_intstatus & GIS_CH_MASK); + } else if (global_intstatus & GIS_SLICE_MASK) { + mc_channel = __ffs((global_intstatus & GIS_SLICE_MASK) >> GIS_SLICE0); + } else if (global_intstatus & GIS_HUB_MASK) { + mc_channel = __ffs((global_intstatus & GIS_HUB_MASK) >> GIS_HUB0); + } else if (global_intstatus & GIS_NVLINK_MASK) { + mc_channel = __ffs((global_intstatus & GIS_NVLINK_MASK) >> GIS_nvlink0); + } else if (global_intstatus & BIT(GIS_HUBC)) { + mc_channel = MC_BROADCAST_CHANNEL; + } else if (global_intstatus & BIT(GIS_SBS)) { + mc_channel = MC_BROADCAST_CHANNEL; + } else if (global_intstatus_1 & GIS_1_CH_MASK) { + mc_channel = 8 + __ffs(global_intstatus_1 & GIS_1_CH_MASK); + } else { + mcerr_pr("mcerr: unknown intr source intstatus = 0x%08x, " + "intstatus_1 = 0x%08x\n", global_intstatus, global_intstatus_1); + } + slice_int_status = __mc_readl(mc_channel, MC_INTSTATUS); + ch_int_status = __mc_readl(mc_channel, MC_CH_INTSTATUS); + hubc_int_status = __mc_readl(mc_channel, MC_HUBC_INTSTATUS); + sbs_int_status = __mc_readl(mc_channel, MC_MSS_SBS_INTSTATUS); + hub_int_status = __mc_readl(mc_channel, MC_HUB_INTSTATUS); } static void clear_intstatus(unsigned int irq) @@ -580,43 +624,6 @@ static void log_mcerr_fault(unsigned int irq) { int faults_handled = 0; const struct mc_error *err; - int mc_channel = MC_BROADCAST_CHANNEL; - u32 slice_int_status, ch_int_status, hubc_int_status; - u32 sbs_int_status, hub_int_status; - u32 g_intstatus = global_intstatus; - u32 g_intstatus_1 = global_intstatus_1; - - /* - * If multiple interrupts come in just handle the first one we see. The - * HW only keeps track of 1 interrupt's data and we don't know which - * particular fault is actually being kept... - */ - - if (g_intstatus & GIS_CH_MASK) { - mc_channel = __ffs(g_intstatus & GIS_CH_MASK); - } else if (g_intstatus & GIS_SLICE_MASK){ - mc_channel = __ffs((g_intstatus & GIS_SLICE_MASK) >> GIS_SLICE0); - } else if (g_intstatus & GIS_HUB_MASK) { - mc_channel = __ffs((g_intstatus & GIS_HUB_MASK) >> GIS_HUB0); - } else if (g_intstatus & GIS_NVLINK_MASK) { - mc_channel = __ffs((g_intstatus & GIS_NVLINK_MASK) >> GIS_nvlink0); - } else if (g_intstatus & BIT(GIS_HUBC)) { - mc_channel = MC_BROADCAST_CHANNEL; - } else if (g_intstatus & BIT(GIS_SBS)) { - mc_channel = MC_BROADCAST_CHANNEL; - } else if (g_intstatus_1 & GIS_1_CH_MASK) { - mc_channel = 8 + __ffs(g_intstatus_1 & GIS_1_CH_MASK); - } else { - mcerr_pr("mcerr: unknown intr source intstatus = 0x%08x, " - "intstatus_1 = 0x%08x\n", g_intstatus, g_intstatus_1); - return; - } - - slice_int_status = __mc_readl(mc_channel, MC_INTSTATUS); - ch_int_status = __mc_readl(mc_channel, MC_CH_INTSTATUS); - hubc_int_status = __mc_readl(mc_channel, MC_HUBC_INTSTATUS); - sbs_int_status = __mc_readl(mc_channel, MC_MSS_SBS_INTSTATUS); - hub_int_status = __mc_readl(mc_channel, MC_HUB_INTSTATUS); LOG_FAULT(slice, mc_int_mask, _); LOG_FAULT(hub, U32_MAX, _HUB_); @@ -625,8 +632,8 @@ static void log_mcerr_fault(unsigned int irq) LOG_FAULT(sbs, U32_MAX, _MSS_SBS_); if (faults_handled) { - mc_writel(g_intstatus, MC_GLOBAL_INTSTATUS); - mc_writel(g_intstatus_1, MC_GLOBAL_INTSTATUS_1); + mc_writel(global_intstatus, MC_GLOBAL_INTSTATUS); + mc_writel(global_intstatus_1, MC_GLOBAL_INTSTATUS_1); } else { pr_err("unknown mcerr fault, int_status=0x%08x, " "ch_int_status=0x%08x, hubc_int_status=0x%08x " diff --git a/kernel/nvidia/drivers/platform/tegra/mc/mcerr-t23x.c b/kernel/nvidia/drivers/platform/tegra/mc/mcerr-t23x.c index a5b960e608..4383b95c56 100644 --- a/kernel/nvidia/drivers/platform/tegra/mc/mcerr-t23x.c +++ b/kernel/nvidia/drivers/platform/tegra/mc/mcerr-t23x.c @@ -2,7 +2,7 @@ /* * - * Copyright (c) 2019-2022, NVIDIA Corporation. All rights reserved. + * Copyright (c) 2019-2023, NVIDIA Corporation. All rights reserved. * * Tegra 23x mcerr driver. */ @@ -336,8 +336,14 @@ static struct mc_client mc_clients[] = { static int mc_client_last = ARRAY_SIZE(mc_clients) - 1; /*** Done. ***/ +static u32 mc_channel = MC_BROADCAST_CHANNEL; static u32 global_intstatus; static u32 global_intstatus_1; +static u32 slice_int_status; +static u32 ch_int_status; +static u32 hubc_int_status; +static u32 sbs_int_status; +static u32 hub_int_status; /* MC_INTSTATUS_0 bits */ static const char *intr_info[] = { @@ -561,6 +567,14 @@ static const struct mc_error sbs_mc_errors[] = { static void set_intstatus(unsigned int irq) { + mc_channel = 0; + global_intstatus = 0; + global_intstatus_1 = 0; + slice_int_status = 0; + ch_int_status = 0; + hubc_int_status = 0; + sbs_int_status = 0; + hub_int_status = 0; } static void clear_intstatus(unsigned int irq) @@ -579,6 +593,43 @@ static void save_intstatus(unsigned int irq) { global_intstatus = mc_readl(MC_GLOBAL_INTSTATUS); global_intstatus_1 = mc_readl(MC_GLOBAL_INTSTATUS_1); + + /* + * If multiple interrupts come in just handle the first one we see. The + * HW only keeps track of 1 interrupt's data and we don't know which + * particular fault is actually being kept... + */ + + if (global_intstatus & GIS_CH_MASK) { + mc_channel = __ffs(global_intstatus & GIS_CH_MASK); + } else if (global_intstatus & GIS_SLICE_MASK) { + mc_channel = __ffs((global_intstatus & GIS_SLICE_MASK) + >> GIS_SLICE0); + } else if (global_intstatus & GIS_HUBG1_MASK) { + mc_channel = __ffs((global_intstatus & GIS_HUBG1_MASK) + >> GIS_HUB0); + } else if (global_intstatus & GIS_NVLINK_MASK) { + mc_channel = __ffs((global_intstatus & GIS_NVLINK_MASK) + >> GIS_nvlink0); + } else if (global_intstatus & GIS_HUBG2_MASK) { + mc_channel = __ffs((global_intstatus & GIS_HUBG2_MASK) + >> GIS_HUB4); + } else if (global_intstatus & BIT(GIS_HUBC)) { + mc_channel = MC_BROADCAST_CHANNEL; + } else if (global_intstatus & BIT(GIS_SBS)) { + mc_channel = MC_BROADCAST_CHANNEL; + } else if (global_intstatus_1 & GIS_1_CH_MASK) { + mc_channel = 8 + __ffs(global_intstatus_1 & GIS_1_CH_MASK); + } else { + mcerr_pr("mcerr: unknown intr source intstatus = 0x%08x, " + "intstatus_1 = 0x%08x\n", global_intstatus, global_intstatus_1); + } + + slice_int_status = __mc_readl(mc_channel, MC_INTSTATUS); + ch_int_status = __mc_readl(mc_channel, MC_CH_INTSTATUS); + hubc_int_status = __mc_readl(mc_channel, MC_HUBC_INTSTATUS); + sbs_int_status = __mc_readl(mc_channel, MC_MSS_SBS_INTSTATUS); + hub_int_status = __mc_readl(mc_channel, MC_HUB_INTSTATUS); } static void log_fault(int src_chan, const struct mc_error *fault) @@ -677,49 +728,6 @@ static void log_mcerr_fault(unsigned int irq) { int faults_handled = 0; const struct mc_error *err; - int mc_channel = MC_BROADCAST_CHANNEL; - u32 slice_int_status, ch_int_status, hubc_int_status; - u32 sbs_int_status, hub_int_status; - u32 g_intstatus = global_intstatus; - u32 g_intstatus_1 = global_intstatus_1; - - /* - * If multiple interrupts come in just handle the first one we see. The - * HW only keeps track of 1 interrupt's data and we don't know which - * particular fault is actually being kept... - */ - - if (g_intstatus & GIS_CH_MASK) { - mc_channel = __ffs(g_intstatus & GIS_CH_MASK); - } else if (g_intstatus & GIS_SLICE_MASK) { - mc_channel = __ffs((g_intstatus & GIS_SLICE_MASK) - >> GIS_SLICE0); - } else if (g_intstatus & GIS_HUBG1_MASK) { - mc_channel = __ffs((g_intstatus & GIS_HUBG1_MASK) - >> GIS_HUB0); - } else if (g_intstatus & GIS_NVLINK_MASK) { - mc_channel = __ffs((g_intstatus & GIS_NVLINK_MASK) - >> GIS_nvlink0); - } else if (g_intstatus & GIS_HUBG2_MASK) { - mc_channel = __ffs((g_intstatus & GIS_HUBG2_MASK) - >> GIS_HUB4); - } else if (g_intstatus & BIT(GIS_HUBC)) { - mc_channel = MC_BROADCAST_CHANNEL; - } else if (g_intstatus & BIT(GIS_SBS)) { - mc_channel = MC_BROADCAST_CHANNEL; - } else if (g_intstatus_1 & GIS_1_CH_MASK) { - mc_channel = 8 + __ffs(g_intstatus_1 & GIS_1_CH_MASK); - } else { - mcerr_pr("mcerr: unknown intr source intstatus = 0x%08x, " - "intstatus_1 = 0x%08x\n", g_intstatus, g_intstatus_1); - return; - } - - slice_int_status = __mc_readl(mc_channel, MC_INTSTATUS); - ch_int_status = __mc_readl(mc_channel, MC_CH_INTSTATUS); - hubc_int_status = __mc_readl(mc_channel, MC_HUBC_INTSTATUS); - sbs_int_status = __mc_readl(mc_channel, MC_MSS_SBS_INTSTATUS); - hub_int_status = __mc_readl(mc_channel, MC_HUB_INTSTATUS); LOG_FAULT(slice, mc_int_mask, _); LOG_FAULT(hub, U32_MAX, _HUB_); @@ -728,8 +736,8 @@ static void log_mcerr_fault(unsigned int irq) LOG_FAULT(sbs, U32_MAX, _MSS_SBS_); if (faults_handled) { - mc_writel(g_intstatus, MC_GLOBAL_INTSTATUS); - mc_writel(g_intstatus_1, MC_GLOBAL_INTSTATUS_1); + mc_writel(global_intstatus, MC_GLOBAL_INTSTATUS); + mc_writel(global_intstatus_1, MC_GLOBAL_INTSTATUS_1); } else { pr_err("unknown mcerr fault, int_status=0x%08x, " "ch_int_status=0x%08x, hubc_int_status=0x%08x " diff --git a/kernel/nvidia/drivers/platform/tegra/rtcpu/debug.c b/kernel/nvidia/drivers/platform/tegra/rtcpu/debug.c index 219a7b7965..aefc9fbae6 100644 --- a/kernel/nvidia/drivers/platform/tegra/rtcpu/debug.c +++ b/kernel/nvidia/drivers/platform/tegra/rtcpu/debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -869,6 +869,10 @@ static void camrtc_run_rmem_unmap_all(struct camrtc_debug *crd, } } +#ifndef INT_MAX +#define INT_MAX ((int)(~0U >> 1)) +#endif + static int camrtc_run_mem_map(struct tegra_ivc_channel *ch, struct device *mem_dev, struct device *dev, @@ -890,6 +894,8 @@ static int camrtc_run_mem_map(struct tegra_ivc_channel *ch, if (mem_dev == dev) { *return_iova = mem->iova; + dma_sync_single_for_device(dev, mem->iova, mem->size, + DMA_BIDIRECTIONAL); goto done; } @@ -901,6 +907,8 @@ static int camrtc_run_mem_map(struct tegra_ivc_channel *ch, *return_iova = 0ULL; return -ENOMEM; } + dma_sync_single_for_device(dev, mem->iova, mem->size, + DMA_BIDIRECTIONAL); } else { ret = dma_get_sgtable(dev, sgt, mem->ptr, mem->iova, mem->size); if (ret < 0) { @@ -918,6 +926,10 @@ static int camrtc_run_mem_map(struct tegra_ivc_channel *ch, } *return_iova = sgt->sgl->dma_address; + if (sgt->nents <= INT_MAX) + dma_sync_sg_for_device(dev, sgt->sgl, (int)sgt->nents, DMA_BIDIRECTIONAL); + else + ret = -EINVAL; } done: @@ -1118,8 +1130,6 @@ static int camrtc_run_mem_test(struct seq_file *file, if (ret < 0) goto unmap; - dma_sync_single_for_device(mem_dev, mem->iova, mem->used, - DMA_BIDIRECTIONAL); } BUILD_BUG_ON_MISMATCH( @@ -1142,8 +1152,12 @@ static int camrtc_run_mem_test(struct seq_file *file, if (!WARN_ON(testmem->size > mem->size)) mem->used = testmem->size; - dma_sync_single_for_cpu(mem_dev, mem->iova, mem->used, - DMA_BIDIRECTIONAL); + if (_camdbg_rmem.enabled) + dma_sync_single_for_cpu(mem_dev, mem->iova, mem->used, + DMA_BIDIRECTIONAL); + else + dma_sync_sg_for_cpu(mem_dev, vi_sgt[i].sgl, + vi_sgt[i].nents, DMA_BIDIRECTIONAL); } unmap: diff --git a/kernel/nvidia/drivers/platform/tegra/tegra-epl.c b/kernel/nvidia/drivers/platform/tegra/tegra-epl.c index 3bd61bad01..f395d8d22b 100644 --- a/kernel/nvidia/drivers/platform/tegra/tegra-epl.c +++ b/kernel/nvidia/drivers/platform/tegra/tegra-epl.c @@ -1,28 +1,6 @@ -/* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -/** - * @file tegra-epl.c - * @brief epl driver to communicate with eplcom daemon - * - * This file will register as client driver so that EPL client can - * report SW error to FSI using HSP mailbox from user space - */ - -/* ==================[Includes]============================================= */ - -#include +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + #include #include #include @@ -32,56 +10,36 @@ #include #include #include -#include -#include "linux/tegra-epl.h" -#include "uapi/linux/tegra-epl.h" +#include #include +#include +#include -/*Timeout in millisec*/ -#define TIMEOUT 1000 +/* Timeout in milliseconds */ +#define TIMEOUT 5U -/*32bit data Length*/ +/* 32bit data Length */ #define MAX_LEN 4 -/* Macro indicating misc register width */ -#define MISC_REG_WIDTH 4U - -/* Macro indiciating total number of Misc Sw generic errors in Misc EC */ +/* Macro indicating total number of Misc Sw generic errors in Misc EC */ #define NUM_SW_GENERIC_ERR 5U -/* Macro for Misc register access, because of guardword check could not - * include the hw headers here. - */ - -/* Macro for Misc EC mission error status register address */ -#define MISC_EC_ERRSLICE0_MISSIONERR_STATUS_0 0x024e0038U - -/* Macro for Misc registers */ - -#define MISCREG_MISC_EC_ERR0_SW_ERR_CODE_0 0x00110000U -#define MISCREG_MISC_EC_ERR0_SW_ERR_ASSERT_0 0x00110004U - -#define MISCREG_MISC_EC_ERR1_SW_ERR_CODE_0 0x00120000U -#define MISCREG_MISC_EC_ERR1_SW_ERR_ASSERT_0 0x00120004U - -#define MISCREG_MISC_EC_ERR2_SW_ERR_CODE_0 0x00130000U -#define MISCREG_MISC_EC_ERR2_SW_ERR_ASSERT_0 0x00130004U - -#define MISCREG_MISC_EC_ERR3_SW_ERR_CODE_0 0x00140000U -#define MISCREG_MISC_EC_ERR3_SW_ERR_ASSERT_0 0x00140004U +/* Error index offset in mission status register */ +#define ERROR_INDEX_OFFSET 24U -#define MISCREG_MISC_EC_ERR4_SW_ERR_CODE_0 0x00150000U -#define MISCREG_MISC_EC_ERR4_SW_ERR_ASSERT_0 0x00150004U - -/* =================[Data types]======================================== */ +enum handshake_state { + HANDSHAKE_PENDING, + HANDSHAKE_FAILED, + HANDSHAKE_DONE +}; -/*Data type for mailbox client and channel details*/ +/* Data type for mailbox client and channel details */ struct epl_hsp_sm { struct mbox_client client; struct mbox_chan *chan; }; -/*Data type for accessing TOP2 HSP */ +/* Data type for accessing TOP2 HSP */ struct epl_hsp { struct epl_hsp_sm tx; struct device dev; @@ -89,18 +47,11 @@ struct epl_hsp { /* Data type to store Misc Sw Generic error configuration */ struct epl_misc_sw_err_cfg { - uint32_t err_code_phyaddr; - uint32_t err_assert_phy_addr; void __iomem *err_code_va; void __iomem *err_assert_va; const char *dev_configured; - uint8_t ec_err_idx; }; -/* =================[GLOBAL variables]================================== */ -static ssize_t device_file_ioctl( - struct file *, unsigned int cmd, unsigned long arg); - static int device_file_major_number; static const char device_name[] = "epdaemon"; @@ -112,71 +63,13 @@ static void __iomem *mission_err_status_va; static bool isAddrMappOk = true; -static struct epl_misc_sw_err_cfg miscerr_cfg[NUM_SW_GENERIC_ERR] = { - { - .err_code_phyaddr = MISCREG_MISC_EC_ERR0_SW_ERR_CODE_0, - .err_assert_phy_addr = MISCREG_MISC_EC_ERR0_SW_ERR_ASSERT_0, - .ec_err_idx = 24U, - }, - { - .err_code_phyaddr = MISCREG_MISC_EC_ERR1_SW_ERR_CODE_0, - .err_assert_phy_addr = MISCREG_MISC_EC_ERR1_SW_ERR_ASSERT_0, - .ec_err_idx = 25U, - }, - { - .err_code_phyaddr = MISCREG_MISC_EC_ERR2_SW_ERR_CODE_0, - .err_assert_phy_addr = MISCREG_MISC_EC_ERR2_SW_ERR_ASSERT_0, - .ec_err_idx = 26U, - }, - { - .err_code_phyaddr = MISCREG_MISC_EC_ERR3_SW_ERR_CODE_0, - .err_assert_phy_addr = MISCREG_MISC_EC_ERR3_SW_ERR_ASSERT_0, - .ec_err_idx = 27U, - }, - { - .err_code_phyaddr = MISCREG_MISC_EC_ERR4_SW_ERR_CODE_0, - .err_assert_phy_addr = MISCREG_MISC_EC_ERR4_SW_ERR_ASSERT_0, - .ec_err_idx = 28U, - } -}; +static struct epl_misc_sw_err_cfg miscerr_cfg[NUM_SW_GENERIC_ERR]; -/*File operations*/ -const static struct file_operations epl_driver_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = device_file_ioctl, -}; +/* State of FSI handshake */ +static enum handshake_state hs_state = HANDSHAKE_PENDING; +static DEFINE_MUTEX(hs_state_mutex); -static int epl_register_device(void) -{ - int result = 0; - struct class *dev_class; - - result = register_chrdev(0, device_name, &epl_driver_fops); - if (result < 0) { - pr_err("%s> register_chrdev code = %i\n", device_name, result); - return result; - } - device_file_major_number = result; - dev_class = class_create(THIS_MODULE, device_name); - if (dev_class == NULL) { - pr_err("%s> Could not create class for device\n", device_name); - goto class_fail; - } - - if ((device_create(dev_class, NULL, - MKDEV(device_file_major_number, 0), - NULL, device_name)) == NULL) { - pr_err("%s> Could not create device node\n", device_name); - goto device_failure; - } - return 0; - -device_failure: - class_destroy(dev_class); -class_fail: - unregister_chrdev(device_file_major_number, device_name); - return -1; -} +static struct task_struct *fsi_handshake_thread; static void tegra_hsp_tx_empty_notify(struct mbox_client *cl, void *data, int empty_value) @@ -202,36 +95,33 @@ static int tegra_hsp_mb_init(struct device *dev) if (IS_ERR(epl_hsp_v->tx.chan)) { err = PTR_ERR(epl_hsp_v->tx.chan); dev_err(dev, "failed to get tx mailbox: %d\n", err); - devm_kfree(dev, epl_hsp_v); return err; } return 0; } -static void epl_unregister_device(void) -{ - if (device_file_major_number != 0) - unregister_chrdev(device_file_major_number, device_name); -} - static ssize_t device_file_ioctl( struct file *fp, unsigned int cmd, unsigned long arg) { uint32_t lData[MAX_LEN]; int ret; - if (copy_from_user(lData, (uint8_t *)arg, + if (copy_from_user(lData, (void __user *)arg, MAX_LEN * sizeof(uint32_t))) return -EACCES; switch (cmd) { case EPL_REPORT_ERROR_CMD: - ret = mbox_send_message(epl_hsp_v->tx.chan, - (void *) lData); - break; + mutex_lock(&hs_state_mutex); + if (hs_state == HANDSHAKE_DONE) + ret = mbox_send_message(epl_hsp_v->tx.chan, (void *) lData); + else + ret = -ENODEV; + mutex_unlock(&hs_state_mutex); + break; default: return -EINVAL; } @@ -256,7 +146,7 @@ int epl_get_misc_ec_err_status(struct device *dev, uint8_t err_number, bool *sta if (strcmp(dev_str, miscerr_cfg[err_number].dev_configured) != 0) return -EACCES; - mask = (1U << (miscerr_cfg[err_number].ec_err_idx % 32U)); + mask = (1U << ((ERROR_INDEX_OFFSET + err_number) % 32U)); mission_err_status = readl(mission_err_status_va); if ((mission_err_status & mask) != 0U) @@ -269,7 +159,7 @@ int epl_get_misc_ec_err_status(struct device *dev, uint8_t err_number, bool *sta return ret; } -EXPORT_SYMBOL(epl_get_misc_ec_err_status); +EXPORT_SYMBOL_GPL(epl_get_misc_ec_err_status); int epl_report_misc_ec_error(struct device *dev, uint8_t err_number, uint32_t sw_error_code) @@ -293,30 +183,79 @@ int epl_report_misc_ec_error(struct device *dev, uint8_t err_number, return 0; } -EXPORT_SYMBOL(epl_report_misc_ec_error); +EXPORT_SYMBOL_GPL(epl_report_misc_ec_error); int epl_report_error(struct epl_error_report_frame error_report) { int ret = -EINVAL; - if (epl_hsp_v == NULL) + mutex_lock(&hs_state_mutex); + if (epl_hsp_v == NULL || hs_state != HANDSHAKE_DONE) { + mutex_unlock(&hs_state_mutex); return -ENODEV; + } + mutex_unlock(&hs_state_mutex); ret = mbox_send_message(epl_hsp_v->tx.chan, (void *)&error_report); return ret < 0 ? ret : 0; } -EXPORT_SYMBOL(epl_report_error); +EXPORT_SYMBOL_GPL(epl_report_error); + +static int epl_client_fsi_handshake(void *arg) +{ + mutex_lock(&hs_state_mutex); + + if (epl_hsp_v) { + int ret; + const uint32_t handshake_data[] = {0x45504C48, 0x414E4453, 0x48414B45, + 0x44415441}; + const uint8_t max_retries = 3; + uint8_t count = 0; + + do { + ret = mbox_send_message(epl_hsp_v->tx.chan, (void *) handshake_data); + + if (ret < 0) { + hs_state = HANDSHAKE_FAILED; + count++; + } else { + hs_state = HANDSHAKE_DONE; + break; + } + } while (count < max_retries || kthread_should_stop()); + } + + if (hs_state == HANDSHAKE_FAILED) + pr_warn("epl_client: handshake with FSI failed\n"); + else + pr_info("epl_client: handshake done with FSI\n"); + + mutex_unlock(&hs_state_mutex); + + return 0; +} static int __maybe_unused epl_client_suspend(struct device *dev) { pr_debug("tegra-epl: suspend called\n"); + + mutex_lock(&hs_state_mutex); + hs_state = HANDSHAKE_PENDING; + mutex_unlock(&hs_state_mutex); + return 0; } static int __maybe_unused epl_client_resume(struct device *dev) { pr_debug("tegra-epl: resume called\n"); + + fsi_handshake_thread = kthread_run(epl_client_fsi_handshake, NULL, "fsi-hs"); + + if (IS_ERR(fsi_handshake_thread)) + return PTR_ERR(fsi_handshake_thread); + return 0; } static SIMPLE_DEV_PM_OPS(epl_client_pm, epl_client_suspend, epl_client_resume); @@ -328,6 +267,50 @@ static const struct of_device_id epl_client_dt_match[] = { MODULE_DEVICE_TABLE(of, epl_client_dt_match); +/* File operations */ +static const struct file_operations epl_driver_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = device_file_ioctl, +}; + +static int epl_register_device(void) +{ + int result = 0; + struct class *dev_class; + + result = register_chrdev(0, device_name, &epl_driver_fops); + if (result < 0) { + pr_err("%s> register_chrdev code = %i\n", device_name, result); + return result; + } + device_file_major_number = result; + dev_class = class_create(THIS_MODULE, device_name); + if (dev_class == NULL) { + pr_err("%s> Could not create class for device\n", device_name); + goto class_fail; + } + + if ((device_create(dev_class, NULL, + MKDEV(device_file_major_number, 0), + NULL, device_name)) == NULL) { + pr_err("%s> Could not create device node\n", device_name); + goto device_failure; + } + return 0; + +device_failure: + class_destroy(dev_class); +class_fail: + unregister_chrdev(device_file_major_number, device_name); + return -1; +} + +static void epl_unregister_device(void) +{ + if (device_file_major_number != 0) + unregister_chrdev(device_file_major_number, device_name); +} + static int epl_client_probe(struct platform_device *pdev) { int ret = 0; @@ -336,6 +319,10 @@ static int epl_client_probe(struct platform_device *pdev) int iterator = 0; char name[32] = "client-misc-sw-generic-err"; + mutex_lock(&hs_state_mutex); + hs_state = HANDSHAKE_PENDING; + mutex_unlock(&hs_state_mutex); + epl_register_device(); ret = tegra_hsp_mb_init(dev); pdev_local = pdev; @@ -349,15 +336,15 @@ static int epl_client_probe(struct platform_device *pdev) /* Mapping registers to process address space */ miscerr_cfg[iterator].err_code_va = - ioremap(miscerr_cfg[iterator].err_code_phyaddr, MISC_REG_WIDTH); + devm_platform_ioremap_resource(pdev, (iterator * 2)); miscerr_cfg[iterator].err_assert_va = - ioremap(miscerr_cfg[iterator].err_assert_phy_addr, MISC_REG_WIDTH); + devm_platform_ioremap_resource(pdev, (iterator * 2) + 1); - if ((miscerr_cfg[iterator].err_code_va == NULL) || - (miscerr_cfg[iterator].err_assert_va == NULL)) { + if (IS_ERR(miscerr_cfg[iterator].err_code_va) || + IS_ERR(miscerr_cfg[iterator].err_assert_va)) { isAddrMappOk = false; ret = -1; - pr_info("epl: error in mapping misc err register for err #%d\n", + dev_err(&pdev->dev, "error in mapping misc err register for err #%d\n", iterator); } } else { @@ -365,11 +352,18 @@ static int epl_client_probe(struct platform_device *pdev) } } - mission_err_status_va = ioremap(MISC_EC_ERRSLICE0_MISSIONERR_STATUS_0, MISC_REG_WIDTH); - if (mission_err_status_va == NULL) { - ret = -1; + mission_err_status_va = devm_platform_ioremap_resource(pdev, NUM_SW_GENERIC_ERR * 2); + if (IS_ERR(mission_err_status_va)) { isAddrMappOk = false; - pr_info("epl: error in mapping mission error status register\n"); + dev_err(&pdev->dev, "error in mapping mission error status register\n"); + return PTR_ERR(mission_err_status_va); + } + + if (ret == 0) { + fsi_handshake_thread = kthread_run(epl_client_fsi_handshake, NULL, "fsi-hs"); + + if (IS_ERR(fsi_handshake_thread)) + return PTR_ERR(fsi_handshake_thread); } return ret; @@ -377,15 +371,7 @@ static int epl_client_probe(struct platform_device *pdev) static int epl_client_remove(struct platform_device *pdev) { - int iterator = 0; - epl_unregister_device(); - devm_kfree(&pdev->dev, epl_hsp_v); - iounmap(mission_err_status_va); - for (iterator = 0; iterator < NUM_SW_GENERIC_ERR; iterator++) { - iounmap(miscerr_cfg[iterator].err_code_va); - iounmap(miscerr_cfg[iterator].err_assert_va); - } return 0; } @@ -396,8 +382,8 @@ static struct platform_driver epl_client = { .of_match_table = of_match_ptr(epl_client_dt_match), .pm = pm_ptr(&epl_client_pm), }, - .probe = epl_client_probe, - .remove = epl_client_remove, + .probe = epl_client_probe, + .remove = epl_client_remove, }; module_platform_driver(epl_client); diff --git a/kernel/nvidia/drivers/platform/tegra/tegra-fsicom.c b/kernel/nvidia/drivers/platform/tegra/tegra-fsicom.c index aa311ed1e2..acfa96e52b 100644 --- a/kernel/nvidia/drivers/platform/tegra/tegra-fsicom.c +++ b/kernel/nvidia/drivers/platform/tegra/tegra-fsicom.c @@ -1,28 +1,6 @@ -/* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -/** - * @file tegra-fsicom.c - * @brief fsicom driver to communicate with fsicom daemon - * - * This file will register as client driver so that FSICOM daemon can - * utilize the smmu mapping and HSP drivers from kernel space - */ - -/* ==================[Includes]============================================= */ - -#include +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + #include #include #include @@ -33,13 +11,14 @@ #include #include #include -#include "linux/tegra-fsicom.h" +#include +#include -/*Timeout in millisec*/ -#define TIMEOUT 1000 +/* Timeout in milliseconds */ +#define TIMEOUT 5U -/* =================[Data types]======================================== */ +#define IOVA_UNI_CODE 0xFE0D /*Data type for mailbox client and channel details*/ struct fsi_hsp_sm { @@ -47,17 +26,13 @@ struct fsi_hsp_sm { struct mbox_chan *chan; }; -/*Data type for accessing TOP2 HSP */ +/* Data type for accessing TOP2 HSP */ struct fsi_hsp { struct fsi_hsp_sm rx; struct fsi_hsp_sm tx; struct device dev; }; -/* =================[GLOBAL variables]================================== */ -static ssize_t device_file_ioctl( - struct file *, unsigned int cmd, unsigned long arg); - static int device_file_major_number; static const char device_name[] = "fsicom-client"; @@ -71,65 +46,25 @@ static struct task_struct *task; static struct fsi_hsp *fsi_hsp_v; -/*File operations*/ -const static struct file_operations fsicom_driver_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = device_file_ioctl, -}; - -static int fsicom_register_device(void) -{ - int result = 0; - struct class *dev_class; - - result = register_chrdev(0, device_name, &fsicom_driver_fops); - if (result < 0) { - pr_err("%s> register_chrdev code = %i\n", device_name, result); - return result; - } - device_file_major_number = result; - dev_class = class_create(THIS_MODULE, "fsicom_client"); - if (dev_class == NULL) { - pr_err("%s> Could not create class for device\n", device_name); - goto class_fail; - } - - if ((device_create(dev_class, NULL, - MKDEV(device_file_major_number, 0), - NULL, "fsicom_client")) == NULL) { - pr_err("%s> Could not create device node\n", device_name); - goto device_failure; - } - return 0; - -device_failure: - class_destroy(dev_class); -class_fail: - unregister_chrdev(device_file_major_number, device_name); - return -1; -} - -static void fsicom_send_sgnal(int32_t data) +static void fsicom_send_signal(int sig, int32_t data) { - struct siginfo info; - /*Sending signal to app */ memset(&info, 0, sizeof(struct siginfo)); - info.si_signo = SIG_FSI_DAEMON; + info.si_signo = sig; info.si_code = SI_QUEUE; info.si_int = (u32) (unsigned long) data; - if (task != NULL) { - if (send_sig_info(SIG_FSI_DAEMON, - (struct kernel_siginfo *)&info, task) < 0) - pr_err("Unable to send signal\n"); - } + + /* Sending signal to app */ + if (task != NULL) + if (send_sig_info(sig, (struct kernel_siginfo *)&info, task) < 0) + pr_err("Unable to send signal %d\n", sig); } static void tegra_hsp_rx_notify(struct mbox_client *cl, void *msg) { - fsicom_send_sgnal(*((uint32_t *)msg)); + fsicom_send_signal(SIG_FSI_WRITE_EVENT, *((uint32_t *)msg)); } static void tegra_hsp_tx_empty_notify(struct mbox_client *cl, @@ -137,6 +72,7 @@ static void tegra_hsp_tx_empty_notify(struct mbox_client *cl, { pr_debug("TX empty callback came\n"); } + static int tegra_hsp_mb_init(struct device *dev) { int err; @@ -145,9 +81,8 @@ static int tegra_hsp_mb_init(struct device *dev) if (!fsi_hsp_v) return -ENOMEM; - if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) dev_err(dev, "FsiCom: setting DMA MASK failed!\n"); - } fsi_hsp_v->tx.client.dev = dev; fsi_hsp_v->rx.client.dev = dev; @@ -161,7 +96,6 @@ static int tegra_hsp_mb_init(struct device *dev) if (IS_ERR(fsi_hsp_v->tx.chan)) { err = PTR_ERR(fsi_hsp_v->tx.chan); dev_err(dev, "failed to get tx mailbox: %d\n", err); - devm_kfree(dev, fsi_hsp_v); return err; } @@ -170,20 +104,12 @@ static int tegra_hsp_mb_init(struct device *dev) if (IS_ERR(fsi_hsp_v->rx.chan)) { err = PTR_ERR(fsi_hsp_v->rx.chan); dev_err(dev, "failed to get rx mailbox: %d\n", err); - devm_kfree(dev, fsi_hsp_v); return err; } return 0; } - -static void fsicom_unregister_device(void) -{ - if (device_file_major_number != 0) - unregister_chrdev(device_file_major_number, device_name); -} - static ssize_t device_file_ioctl( struct file *fp, unsigned int cmd, unsigned long arg) { @@ -193,15 +119,16 @@ static ssize_t device_file_ioctl( struct rw_data *user_input; int ret = 0; uint32_t pdata[4] = {0}; + struct iova_data ldata; - user_input = (struct rw_data *)arg; - if (copy_from_user(&input, (void __user *)arg, - sizeof(struct rw_data))) - return -EACCES; switch (cmd) { case NVMAP_SMMU_MAP: + user_input = (struct rw_data *)arg; + if (copy_from_user(&input, (void __user *)arg, + sizeof(struct rw_data))) + return -EACCES; dmabuf = dma_buf_get(input.handle); if (IS_ERR_OR_NULL(dmabuf)) @@ -225,15 +152,32 @@ static ssize_t device_file_ioctl( if (copy_to_user((void __user *)&user_input->iova, (void *)&dma_addr, sizeof(uint64_t))) return -EACCES; + if (copy_to_user((void __user *)&user_input->dmabuf, + (void *)&dmabuf, sizeof(uint64_t))) + return -EACCES; + if (copy_to_user((void __user *)&user_input->attach, + (void *)&attach, sizeof(uint64_t))) + return -EACCES; + if (copy_to_user((void __user *)&user_input->sgt, + (void *)&sgt, sizeof(uint64_t))) + return -EACCES; + break; case NVMAP_SMMU_UNMAP: - dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); - dma_buf_detach(dmabuf, attach); - dma_buf_put(dmabuf); + if (copy_from_user(&input, (void __user *)arg, + sizeof(struct rw_data))) + return -EACCES; + dma_buf_unmap_attachment((struct dma_buf_attachment *)input.attach, + (struct sg_table *) input.sgt, DMA_BIDIRECTIONAL); + dma_buf_detach((struct dma_buf *)input.dmabuf, (struct dma_buf_attachment *) input.attach); + dma_buf_put((struct dma_buf *)input.dmabuf); break; case TEGRA_HSP_WRITE: + if (copy_from_user(&input, (void __user *)arg, + sizeof(struct rw_data))) + return -EACCES; pdata[0] = input.handle; ret = mbox_send_message(fsi_hsp_v->tx.chan, (void *)pdata); @@ -243,6 +187,18 @@ static ssize_t device_file_ioctl( task = get_current(); break; + case TEGRA_IOVA_DATA: + if (copy_from_user(&ldata, (void __user *)arg, + sizeof(struct iova_data))) + return -EACCES; + pdata[0] = ldata.offset; + pdata[1] = ldata.iova; + pdata[2] = ldata.chid; + pdata[3] = IOVA_UNI_CODE; + ret = mbox_send_message(fsi_hsp_v->tx.chan, + (void *)pdata); + break; + default: return -EINVAL; } @@ -250,6 +206,50 @@ static ssize_t device_file_ioctl( return ret; } +/* File operations */ +static const struct file_operations fsicom_driver_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = device_file_ioctl, +}; + +static int fsicom_register_device(void) +{ + int result = 0; + struct class *dev_class; + + result = register_chrdev(0, device_name, &fsicom_driver_fops); + if (result < 0) { + pr_err("%s> register_chrdev code = %i\n", device_name, result); + return result; + } + device_file_major_number = result; + dev_class = class_create(THIS_MODULE, "fsicom_client"); + if (dev_class == NULL) { + pr_err("%s> Could not create class for device\n", device_name); + goto class_fail; + } + + if ((device_create(dev_class, NULL, + MKDEV(device_file_major_number, 0), + NULL, "fsicom_client")) == NULL) { + pr_err("%s> Could not create device node\n", device_name); + goto device_failure; + } + return 0; + +device_failure: + class_destroy(dev_class); +class_fail: + unregister_chrdev(device_file_major_number, device_name); + return -1; +} + +static void fsicom_unregister_device(void) +{ + if (device_file_major_number != 0) + unregister_chrdev(device_file_major_number, device_name); +} + static const struct of_device_id fsicom_client_dt_match[] = { { .compatible = "nvidia,tegra234-fsicom-client"}, {} @@ -271,15 +271,31 @@ static int fsicom_client_probe(struct platform_device *pdev) static int fsicom_client_remove(struct platform_device *pdev) { fsicom_unregister_device(); - devm_kfree(&pdev->dev, fsi_hsp_v); return 0; } +static int __maybe_unused fsicom_client_suspend(struct device *dev) +{ + dev_dbg(dev, "suspend called\n"); + return 0; +} + +static int __maybe_unused fsicom_client_resume(struct device *dev) +{ + dev_dbg(dev, "resume called\n"); + + fsicom_send_signal(SIG_DRIVER_RESUME, 0); + return 0; +} + +static SIMPLE_DEV_PM_OPS(fsicom_client_pm, fsicom_client_suspend, fsicom_client_resume); + static struct platform_driver fsicom_client = { .driver = { .name = "fsicom_client", .probe_type = PROBE_PREFER_ASYNCHRONOUS, .of_match_table = of_match_ptr(fsicom_client_dt_match), + .pm = pm_ptr(&fsicom_client_pm), }, .probe = fsicom_client_probe, .remove = fsicom_client_remove, diff --git a/kernel/nvidia/drivers/video/tegra/host/nvdla/dla_os_interface.h b/kernel/nvidia/drivers/video/tegra/host/nvdla/dla_os_interface.h index 8e7601b97d..cc340bf844 100644 --- a/kernel/nvidia/drivers/video/tegra/host/nvdla/dla_os_interface.h +++ b/kernel/nvidia/drivers/video/tegra/host/nvdla/dla_os_interface.h @@ -1,7 +1,7 @@ /* * NVDLA OS Interface * - * Copyright (c) 2016-2021, NVIDIA Corporation. All rights reserved. + * Copyright (c) 2016-2022, NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -141,39 +141,47 @@ enum dla_commands { /** * Used for testing communication between CCPLEX and DLA */ - DLA_CMD_PING = 1U, - DLA_CMD_GET_STATUS_UNUSED = 2U, - DLA_CMD_RESET_UNUSED = 3U, - DLA_CMD_DLA_CONTROL_UNUSED = 4U, + DLA_CMD_PING = 1U, + DLA_CMD_GET_STATUS_UNUSED = 2U, + DLA_CMD_RESET_UNUSED = 3U, + DLA_CMD_DLA_CONTROL_UNUSED = 4U, DLA_CMD_GET_QUEUE_STATUS_UNUSED = 5U, - DLA_CMD_GET_STATISTICS_UNUSED = 6U, + DLA_CMD_GET_STATISTICS = 6U, /** * Submit task to DLA */ - DLA_CMD_SUBMIT_TASK = 7U, + DLA_CMD_SUBMIT_TASK = 7U, DLA_CMD_SET_SCHEDULER_UNUSED = 8U, - DLA_CMD_READ_INFO_UNUSED = 9U, + DLA_CMD_READ_INFO_UNUSED = 9U, /** * Set various debugging parameters (trace/printf/crashdump) * Only enabled in Debug build. */ - DLA_CMD_SET_DEBUG = 10U, + DLA_CMD_SET_DEBUG = 10U, /** * Set the address & size of various regions used for various reasons */ - DLA_CMD_SET_REGIONS = 11U, + DLA_CMD_SET_REGIONS = 11U, /** * Suspend processing a queue */ - DLA_CMD_QUEUE_SUSPEND = 12U, + DLA_CMD_QUEUE_SUSPEND = 12U, /** * Resume processing a queue */ - DLA_CMD_QUEUE_RESUME = 13U, + DLA_CMD_QUEUE_RESUME = 13U, /** * Flushes a queue */ - DLA_CMD_QUEUE_FLUSH = 14U, + DLA_CMD_QUEUE_FLUSH = 14U, + /** + * Sets stat window size + */ + DLA_CMD_SET_STAT_WINDOW_SIZE = 15U, + /** + * Gets stat window size + */ + DLA_CMD_GET_STAT_WINDOW_SIZE = 16U, }; /** diff --git a/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla.c b/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla.c index 1a45786847..da63dd414b 100644 --- a/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla.c +++ b/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla.c @@ -592,6 +592,78 @@ int nvhost_nvdla_prepare_poweroff(struct platform_device *pdev) return ret; } +/* Free utilization rate memory */ +void nvdla_free_utilization_rate_memory(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct nvdla_device *nvdla_dev = pdata->private_data; + + if (nvdla_dev->utilization_mem_pa) { + dma_free_attrs(&pdev->dev, sizeof(unsigned int), + nvdla_dev->utilization_mem_va, + nvdla_dev->utilization_mem_pa, + 0); + nvdla_dev->utilization_mem_va = NULL; + nvdla_dev->utilization_mem_pa = 0; + } +} + +/* Allocate memory to store the resource utilization rate */ +int nvdla_alloc_utilization_rate_memory(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct nvdla_device *nvdla_dev = pdata->private_data; + int err = 0; + + /* allocate memory for utilization rate */ + nvdla_dev->utilization_mem_va = dma_alloc_attrs(&pdev->dev, + sizeof(unsigned int), &nvdla_dev->utilization_mem_pa, + GFP_KERNEL, 0); + + if (nvdla_dev->utilization_mem_va == NULL) { + nvdla_dbg_err(pdev, "utilization rate dma alloc failed"); + err = -ENOMEM; + } + + return err; +} + +/* Free window size memory */ +void nvdla_free_window_size_memory(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct nvdla_device *nvdla_dev = pdata->private_data; + + if (nvdla_dev->window_mem_pa) { + dma_free_attrs(&pdev->dev, sizeof(unsigned int), + nvdla_dev->window_mem_va, + nvdla_dev->window_mem_pa, + 0); + nvdla_dev->window_mem_va = NULL; + nvdla_dev->window_mem_pa = 0; + } +} + +/* Allocate memory to store the window size for which the utilization rate is computed */ +int nvdla_alloc_window_size_memory(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct nvdla_device *nvdla_dev = pdata->private_data; + int err = 0; + + /* allocate memory for window_size */ + nvdla_dev->window_mem_va = dma_alloc_attrs(&pdev->dev, + sizeof(unsigned int), &nvdla_dev->window_mem_pa, + GFP_KERNEL, 0); + + if (nvdla_dev->window_mem_va == NULL) { + nvdla_dbg_err(pdev, "window size dma alloc failed"); + err = -ENOMEM; + } + + return err; +} + #ifdef CONFIG_TEGRA_SOC_HWPM static int nvdla_hwpm_ip_pm(void *ip_dev, bool disable) { @@ -838,6 +910,7 @@ static int nvdla_probe(struct platform_device *pdev) mutex_init(&pdata->lock); mutex_init(&nvdla_dev->cmd_lock); init_completion(&nvdla_dev->cmd_completion); + mutex_init(&nvdla_dev->ping_lock); pdata->private_data = nvdla_dev; platform_set_drvdata(pdev, pdata); nvdla_dev->dbg_mask = debug_err; @@ -878,6 +951,14 @@ static int nvdla_probe(struct platform_device *pdev) if (err) goto err_alloc_cmd_mem; + err = nvdla_alloc_utilization_rate_memory(pdev); + if (err) + goto err_alloc_utilization_rate_mem; + + err = nvdla_alloc_window_size_memory(pdev); + if (err) + goto err_alloc_window_size_mem; + #ifdef CONFIG_TEGRA_SOC_HWPM nvdla_dbg_info(pdev, "hwpm ip %s register", pdev->name); hwpm_ip_ops.ip_dev = (void *)pdev; @@ -891,6 +972,11 @@ static int nvdla_probe(struct platform_device *pdev) nvdla_dbg_info(pdev, "pdata:%p initialized\n", pdata); return 0; + +err_alloc_window_size_mem: + nvdla_free_utilization_rate_memory(pdev); +err_alloc_utilization_rate_mem: + nvdla_free_cmd_memory(pdev); err_alloc_cmd_mem: nvhost_syncpt_unit_interface_deinit(pdev); err_mss_init: @@ -901,6 +987,7 @@ static int nvdla_probe(struct platform_device *pdev) nvhost_module_deinit(pdev); err_module_init: err_get_resources: + mutex_destroy(&nvdla_dev->ping_lock); devm_kfree(dev, nvdla_dev); err_alloc_nvdla: err_no_ip: @@ -930,7 +1017,7 @@ static int __exit nvdla_remove(struct platform_device *pdev) nvdla_queue_deinit(nvdla_dev->pool); nvhost_client_device_release(pdev); nvhost_module_deinit(pdev); - + mutex_destroy(&nvdla_dev->ping_lock); nvdla_free_gcov_region(pdev, false); if (nvdla_dev->trace_dump_pa) { @@ -951,6 +1038,9 @@ static int __exit nvdla_remove(struct platform_device *pdev) nvdla_dev->debug_dump_pa = 0; } + nvdla_free_utilization_rate_memory(pdev); + nvdla_free_window_size_memory(pdev); + /* free command mem in last */ nvdla_free_cmd_memory(pdev); diff --git a/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla.h b/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla.h index c27ccb31c2..6ecae35286 100644 --- a/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla.h +++ b/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla.h @@ -225,24 +225,29 @@ enum nvdla_submit_mode { /** * data structure to keep per DLA engine device data * - * @pdev pointer to platform device - * @pool pointer to queue table - * @dbg_mask debug mask for print level - * @en_trace flag to enable kernel tracing - * @submit_mode flag to enable task submit mode, default is - * NVDLA_SUBMIT_MODE_MMIO - * @fw_version saves current firmware version - * @cmd_mem structure to hold command memory pool - * @trace_enable to enable/disable the DLA firmware trace - * @events_mask mask to set/reset the different DLA firmware trace event - * @debug_dump_pa physical address of print buffer - * @debug_dump_va virtual address of print buffer - * @trace_dump_pa physical address of trace buffer - * @trace_dump_va virtual address of trace buffer - * @en_fw_gcov flag to enable firmware gcov - * @gcov_dump_pa physical address of fw gcov buffer - * @gcov_dump_va virtual address of fw gcovbuffer + * @pdev pointer to platform device + * @pool pointer to queue table + * @dbg_mask debug mask for print level + * @en_trace flag to enable kernel tracing + * @submit_mode flag to enable task submit mode, default is + * NVDLA_SUBMIT_MODE_MMIO + * @fw_version saves current firmware version + * @cmd_mem structure to hold command memory pool + * @trace_enable to enable/disable the DLA firmware trace + * @events_mask mask to set/reset the different DLA firmware trace event + * @debug_dump_pa physical address of print buffer + * @debug_dump_va virtual address of print buffer + * @trace_dump_pa physical address of trace buffer + * @trace_dump_va virtual address of trace buffer + * @en_fw_gcov flag to enable firmware gcov + * @gcov_dump_pa physical address of fw gcov buffer + * @gcov_dump_va virtual address of fw gcovbuffer + * @utilization_mem_pa physical address of resource utilization buffer + * @utilization_mem_va virtual address of resource utilization buffer + * @window_mem_pa physical address of window size buffer + * @window_mem_va virtual address of window size buffer * @is_suspended flag to check if module is in suspend state. + * @ping_lock lock to synchronize the ping operation requests. */ struct nvdla_device { struct platform_device *pdev; @@ -266,9 +271,14 @@ struct nvdla_device { dma_addr_t gcov_dump_pa; u32 *gcov_dump_va; struct work_struct reset_work; + dma_addr_t utilization_mem_pa; + u32 *utilization_mem_va; + dma_addr_t window_mem_pa; + u32 *window_mem_va; #ifdef CONFIG_PM bool is_suspended; #endif + struct mutex ping_lock; }; /** diff --git a/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla_debug.c b/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla_debug.c index 5b65e93a9e..bab19e242b 100644 --- a/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla_debug.c +++ b/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla_debug.c @@ -288,6 +288,240 @@ static int debug_dla_fw_gcov_gcda_show(struct seq_file *s, void *data) return 0; } +static int nvdla_get_stats(struct nvdla_device *nvdla_dev) +{ + int err = 0; + struct nvdla_cmd_data cmd_data; + struct platform_device *pdev; + + /* prepare command data */ + cmd_data.method_id = DLA_CMD_GET_STATISTICS; + cmd_data.method_data = ALIGNED_DMA(nvdla_dev->utilization_mem_pa); + cmd_data.wait = true; + + pdev = nvdla_dev->pdev; + if (pdev == NULL) + return -EFAULT; + + /* pass set debug command to falcon */ + err = nvdla_send_cmd(pdev, &cmd_data); + if (err != 0) + nvdla_dbg_err(pdev, "failed to send get stats command"); + + return err; +} + +static int debug_dla_fw_resource_util_show(struct seq_file *s, void *data) +{ + int err; + struct nvdla_device *nvdla_dev; + struct platform_device *pdev; + + unsigned int utilization, util_rate_characteristic, util_rate_mantissa; + + if (s == NULL) { + err = -EFAULT; + goto fail_no_dev; + } + + nvdla_dev = (struct nvdla_device *) s->private; + if (nvdla_dev == NULL) { + err = -EFAULT; + goto fail_no_dev; + } + + pdev = nvdla_dev->pdev; + if (pdev == NULL) { + err = -EFAULT; + goto fail_no_dev; + } + + /* make sure that device is powered on */ + err = nvhost_module_busy(pdev); + if (err != 0) { + nvdla_dbg_err(pdev, "failed to power on\n"); + err = -ENODEV; + goto fail_no_dev; + } + + err = nvdla_get_stats(nvdla_dev); + if (err != 0) { + nvdla_dbg_err(pdev, "Failed to send get stats command"); + goto fail_to_send_cmd; + } + + utilization = *(unsigned int *)nvdla_dev->utilization_mem_va; + util_rate_characteristic = (utilization / 10000); + util_rate_mantissa = (utilization % 10000); + + seq_printf(s, "%u.%04u\n", util_rate_characteristic, util_rate_mantissa); + +fail_to_send_cmd: + nvhost_module_idle(pdev); +fail_no_dev: + return err; +} + +static int nvdla_get_window_size(struct nvdla_device *nvdla_dev) +{ + int err = 0; + struct nvdla_cmd_data cmd_data; + struct platform_device *pdev; + + /* prepare command data */ + cmd_data.method_id = DLA_CMD_GET_STAT_WINDOW_SIZE; + cmd_data.method_data = ALIGNED_DMA(nvdla_dev->window_mem_pa); + cmd_data.wait = true; + + pdev = nvdla_dev->pdev; + if (pdev == NULL) { + err = -EFAULT; + goto fail_no_dev; + } + + /* make sure that device is powered on */ + err = nvhost_module_busy(pdev); + if (err != 0) { + nvdla_dbg_err(pdev, "failed to power on\n"); + err = -ENODEV; + goto fail_no_dev; + } + + /* pass set debug command to falcon */ + err = nvdla_send_cmd(pdev, &cmd_data); + if (err != 0) { + nvdla_dbg_err(pdev, "failed to send set window command"); + goto fail_to_send_cmd; + } + +fail_to_send_cmd: + nvhost_module_idle(pdev); +fail_no_dev: + return err; +} + +static int debug_dla_fw_stat_window_show(struct seq_file *s, void *data) +{ + int err; + struct nvdla_device *nvdla_dev; + struct platform_device *pdev; + + if (s == NULL) { + err = -EFAULT; + goto fail; + } + + nvdla_dev = (struct nvdla_device *) s->private; + if (nvdla_dev == NULL) { + err = -EFAULT; + goto fail; + } + + pdev = nvdla_dev->pdev; + if (pdev == NULL) { + err = -EFAULT; + goto fail; + } + + err = nvdla_get_window_size(nvdla_dev); + if (err != 0) { + nvdla_dbg_err(pdev, "Failed to get window size"); + goto fail; + } + + seq_printf(s, "%u\n", *(unsigned int *)nvdla_dev->window_mem_va); + + return 0; + +fail: + return err; +} + +/* + * When the user calls this debugfs node, the configurable + * window size value is passed down to the FW + */ +static int nvdla_set_window_size(struct nvdla_device *nvdla_dev) +{ + int err = 0; + struct nvdla_cmd_data cmd_data; + struct platform_device *pdev; + + /* prepare command data */ + cmd_data.method_id = DLA_CMD_SET_STAT_WINDOW_SIZE; + cmd_data.method_data = ALIGNED_DMA(nvdla_dev->window_mem_pa); + cmd_data.wait = true; + + pdev = nvdla_dev->pdev; + if (pdev == NULL) { + err = -EFAULT; + goto fail_no_dev; + } + + /* make sure that device is powered on */ + err = nvhost_module_busy(pdev); + if (err != 0) { + nvdla_dbg_err(pdev, "failed to power on\n"); + err = -ENODEV; + goto fail_no_dev; + } + + /* pass set debug command to falcon */ + err = nvdla_send_cmd(pdev, &cmd_data); + if (err != 0) { + nvdla_dbg_err(pdev, "failed to send set window command"); + goto fail_to_send_cmd; + } + +fail_to_send_cmd: + nvhost_module_idle(pdev); +fail_no_dev: + return err; +} + +static ssize_t debug_dla_fw_stat_window_write(struct file *file, + const char __user *buffer, size_t count, loff_t *off) +{ + int err; + struct seq_file *priv_data; + struct nvdla_device *nvdla_dev; + struct platform_device *pdev; + long write_value; + u32 *window_va; + + /* Fetch user requested write-value. */ + err = kstrtol_from_user(buffer, count, 10, &write_value); + if (err < 0) + goto fail; + + priv_data = file->private_data; + if (priv_data == NULL) + goto fail; + + nvdla_dev = (struct nvdla_device *) priv_data->private; + if (nvdla_dev == NULL) + goto fail; + + pdev = nvdla_dev->pdev; + if (pdev == NULL) + goto fail; + + window_va = nvdla_dev->window_mem_va; + if (write_value < UINT_MAX) + *window_va = write_value; + + err = nvdla_set_window_size(nvdla_dev); + if (err != 0) { + nvdla_dbg_err(pdev, "Failed to send set window size command"); + goto fail; + } + + return count; + +fail: + return -1; +} + static int debug_dla_enable_trace_open(struct inode *inode, struct file *file) { return single_open(file, debug_dla_enable_trace_show, inode->i_private); @@ -323,6 +557,16 @@ static int debug_dla_fw_gcov_gcda_open(struct inode *inode, struct file *file) return single_open(file, debug_dla_fw_gcov_gcda_show, inode->i_private); } +static int debug_dla_fw_resource_util_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_dla_fw_resource_util_show, inode->i_private); +} + +static int debug_dla_fw_stat_window_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_dla_fw_stat_window_show, inode->i_private); +} + static int debug_set_trace_event_config(struct platform_device *pdev, u32 value, u32 sub_cmd) { @@ -610,6 +854,21 @@ static const struct file_operations nvdla_fw_reload_fops = { .write = debug_dla_fw_reload_set, }; +static const struct file_operations debug_dla_resource_util_fops = { + .open = debug_dla_fw_resource_util_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations debug_dla_stat_window_fops = { + .open = debug_dla_fw_stat_window_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = debug_dla_fw_stat_window_write, +}; + static void dla_fw_debugfs_init(struct platform_device *pdev) { struct dentry *fw_dir, *fw_trace, *events, *fw_gcov; @@ -674,6 +933,14 @@ static void dla_fw_debugfs_init(struct platform_device *pdev) nvdla_dev, &debug_dla_fw_gcov_gcda_fops)) goto gcov_failed; + if (!debugfs_create_file("utilization_rate", S_IRUSR, fw_dir, + nvdla_dev, &debug_dla_resource_util_fops)) + goto trace_failed; + + if (!debugfs_create_file("stat_window_size", S_IRUSR | S_IWUSR, fw_dir, + nvdla_dev, &debug_dla_stat_window_fops)) + goto trace_failed; + return; gcov_failed: diff --git a/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla_ioctl.c b/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla_ioctl.c index 75278ef4ac..d70c9f596a 100644 --- a/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla_ioctl.c +++ b/kernel/nvidia/drivers/video/tegra/host/nvdla/nvdla_ioctl.c @@ -258,6 +258,8 @@ static int nvdla_ping(struct platform_device *pdev, { struct nvdla_cmd_mem_info ping_cmd_mem_info; struct nvdla_cmd_data cmd_data; + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct nvdla_device *nvdla_dev = pdata->private_data; u32 *ping_va; int err = 0; @@ -275,6 +277,14 @@ static int nvdla_ping(struct platform_device *pdev, goto fail_to_on; } + if (nvdla_dev == NULL) { + nvdla_dbg_err(pdev, "Invalid nvdla device\n"); + err = -EINVAL; + goto fail_to_get_nvdla_dev; + } + + mutex_lock(&nvdla_dev->ping_lock); + /* assign ping cmd buffer */ err = nvdla_get_cmd_memory(pdev, &ping_cmd_mem_info); if (err) { @@ -313,6 +323,8 @@ static int nvdla_ping(struct platform_device *pdev, fail_cmd: nvdla_put_cmd_memory(pdev, ping_cmd_mem_info.index); fail_to_alloc: + mutex_unlock(&nvdla_dev->ping_lock); +fail_to_get_nvdla_dev: nvhost_module_idle(pdev); fail_to_on: fail_to_get_val_arg: diff --git a/kernel/nvidia/drivers/video/tegra/host/pva/nvpva_syncpt.c b/kernel/nvidia/drivers/video/tegra/host/pva/nvpva_syncpt.c index 3ca3b3bb75..03744f493e 100644 --- a/kernel/nvidia/drivers/video/tegra/host/pva/nvpva_syncpt.c +++ b/kernel/nvidia/drivers/video/tegra/host/pva/nvpva_syncpt.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include "pva.h" diff --git a/kernel/nvidia/drivers/video/tegra/host/t194/t194.c b/kernel/nvidia/drivers/video/tegra/host/t194/t194.c index aed3a546d3..53ab851b9c 100644 --- a/kernel/nvidia/drivers/video/tegra/host/t194/t194.c +++ b/kernel/nvidia/drivers/video/tegra/host/t194/t194.c @@ -572,8 +572,6 @@ static void t194_init_regs(struct platform_device *pdev, bool prod) { struct nvhost_gating_register *cg_regs = t19x_host1x_gating_registers; struct nvhost_streamid_mapping *map_regs = t19x_host1x_streamid_mapping; - ktime_t now, start = ktime_get(); - u32 ram_init; int ret = 0; u64 cl = 0; @@ -581,33 +579,6 @@ static void t194_init_regs(struct platform_device *pdev, bool prod) return; } - /* Ensure that HW has finished initializing syncpt RAM prior to use */ - for (;;) { - /* XXX: This retry loop takes too long to timeout on VDK */ - if (tegra_platform_is_sim()) { - pr_info("%s: Skipping ram_init done check on sim.\n", - __func__); - break; - } - - ram_init = host1x_hypervisor_readl(pdev, - host1x_sync_syncpt_ram_init_0_r()); - if (!host1x_sync_syncpt_ram_init_0_ram_init_v(ram_init)) { - pr_info("%s: Host1x HW syncpt ram init disabled\n", - __func__); - break; - } - if (host1x_sync_syncpt_ram_init_0_ram_init_done_v(ram_init)) - break; - - now = ktime_get(); - if (ktime_ms_delta(now, start) >= SYNCPT_RAM_INIT_TIMEOUT_MS) { - pr_err("%s: Timed out waiting for syncpt ram init!\n", - __func__); - break; - } - } - /* Use old mapping registers on older simulator CLs */ ret = of_property_read_u64(pdev->dev.of_node, "nvidia,changelist", diff --git a/kernel/nvidia/drivers/video/tegra/host/t23x/t23x.c b/kernel/nvidia/drivers/video/tegra/host/t23x/t23x.c index 740fd91d2a..0ec07d876b 100644 --- a/kernel/nvidia/drivers/video/tegra/host/t23x/t23x.c +++ b/kernel/nvidia/drivers/video/tegra/host/t23x/t23x.c @@ -529,40 +529,11 @@ static void t23x_remove_support(struct nvhost_chip_support *op) static void t23x_init_gating_regs(struct platform_device *pdev, bool prod) { struct nvhost_gating_register *cg_regs = t23x_host1x_gating_registers; - ktime_t now, start = ktime_get(); - u32 ram_init; if (nvhost_dev_is_virtual(pdev) == true) { return; } - /* Ensure that HW has finished initializing syncpt RAM prior to use */ - for (;;) { - /* XXX: This retry loop takes too long to timeout on VDK */ - if (tegra_platform_is_sim()) { - pr_info("%s: Skipping ram_init done check on sim.\n", - __func__); - break; - } - - ram_init = host1x_hypervisor_readl(pdev, - host1x_sync_syncpt_ram_init_0_r()); - if (!host1x_sync_syncpt_ram_init_0_ram_init_v(ram_init)) { - pr_info("%s: Host1x HW syncpt ram init disabled\n", - __func__); - break; - } - if (host1x_sync_syncpt_ram_init_0_ram_init_done_v(ram_init)) - break; - - now = ktime_get(); - if (ktime_ms_delta(now, start) >= SYNCPT_RAM_INIT_TIMEOUT_MS) { - pr_err("%s: Timed out waiting for syncpt ram init!\n", - __func__); - break; - } - } - while (cg_regs->addr) { u32 val = prod ? cg_regs->prod : cg_regs->disable; diff --git a/kernel/nvidia/include/linux/tegra-epl.h b/kernel/nvidia/include/linux/tegra-epl.h index 37803fb5cb..a119fda3b2 100644 --- a/kernel/nvidia/include/linux/tegra-epl.h +++ b/kernel/nvidia/include/linux/tegra-epl.h @@ -1,31 +1,12 @@ -/* - * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* SPDX-License-Identifier: GPL-2.0 * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/** - * @file tegra-epl.h - * @brief epl driver header file - * - * This file will expose API prototype for epl kernel - * space APIs. + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ +#ifndef _TEGRA_EPL_H_ +#define _TEGRA_EPL_H_ -#ifndef TEGRA_EPL_H -#define TEGRA_EPL_H - -/* ==================[Includes]============================================= */ - -/* ==================[MACROS]=============================================== */ +#include /** * @brief Error report frame @@ -117,6 +98,7 @@ int epl_report_error(struct epl_error_report_frame error_report) { return -ENODEV; } -#endif /* CONFIG_TEGRA_EPL */ +#endif /* CONFIG_TEGRA_EPL */ + +#endif /* _TEGRA_EPL_H_ */ -#endif /* TEGRA_EPL_H */ diff --git a/kernel/nvidia/include/uapi/linux/nvhost_nvdla_ioctl.h b/kernel/nvidia/include/uapi/linux/nvhost_nvdla_ioctl.h index 32d9552c0a..a14d344b81 100644 --- a/kernel/nvidia/include/uapi/linux/nvhost_nvdla_ioctl.h +++ b/kernel/nvidia/include/uapi/linux/nvhost_nvdla_ioctl.h @@ -36,7 +36,7 @@ #define MAX_NVDLA_EMU_PREFENCES_PER_TASK 16 #define MAX_NVDLA_EMU_POSTFENCES_PER_TASK 16 #define MAX_NVDLA_IN_STATUS_PER_TASK MAX_NVDLA_PREFENCES_PER_TASK -#define MAX_NVDLA_OUT_STATUS_PER_TASK MAX_NVDLA_POSTFENCES_PER_TASK +#define MAX_NVDLA_OUT_STATUS_PER_TASK 36 #define MAX_NVDLA_OUT_TIMESTAMPS_PER_TASK 32 /** diff --git a/kernel/nvidia/include/uapi/linux/tegra-epl.h b/kernel/nvidia/include/uapi/linux/tegra-epl.h index 05db2afdbf..01137affad 100644 --- a/kernel/nvidia/include/uapi/linux/tegra-epl.h +++ b/kernel/nvidia/include/uapi/linux/tegra-epl.h @@ -1,36 +1,15 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION, All rights reserved. +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -/** - * @file tegra-epl.h - * @brief epl driver header file - * - * This file will expose the data types and macros for making ioctl call - * from user space EPL library. + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ - -#ifndef EPL_CLIENT_IOCTL_H -#define EPL_CLIENT_IOCTL_H - -/* ==================[Includes]============================================= */ +#ifndef _UAPI_TEGRA_EPL_H_ +#define _UAPI_TEGRA_EPL_H_ #include -/* ==================[MACROS]=============================================== */ - -/*ioctl call macros*/ +/* ioctl call macros */ #define EPL_REPORT_ERROR_CMD _IOWR('q', 1, uint8_t *) -#endif /* EPL_CLIENT_IOCTL_H */ +#endif /* _UAPI_TEGRA_EPL_H_ */ + diff --git a/kernel/nvidia/include/uapi/linux/tegra-fsicom.h b/kernel/nvidia/include/uapi/linux/tegra-fsicom.h new file mode 100644 index 0000000000..ffc891547d --- /dev/null +++ b/kernel/nvidia/include/uapi/linux/tegra-fsicom.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) + * + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef _UAPI_TEGRA_FSICOM_H_ +#define _UAPI_TEGRA_FSICOM_H_ + +#include + +struct rw_data { + uint32_t handle; + uint64_t pa; + uint64_t iova; + uint64_t dmabuf; + uint64_t attach; + uint64_t sgt; +}; + +/*Data type for sending the offset,IOVA and channel Id details to FSI */ +struct iova_data { + uint32_t offset; + uint32_t iova; + uint32_t chid; +}; + +/* signal value */ +#define SIG_DRIVER_RESUME 43 +#define SIG_FSI_WRITE_EVENT 44 + +/* ioctl call macros */ +#define NVMAP_SMMU_MAP _IOWR('q', 1, struct rw_data *) +#define NVMAP_SMMU_UNMAP _IOWR('q', 2, struct rw_data *) +#define TEGRA_HSP_WRITE _IOWR('q', 3, struct rw_data *) +#define TEGRA_SIGNAL_REG _IOWR('q', 4, struct rw_data *) +#define TEGRA_IOVA_DATA _IOWR('q', 5, struct iova_data *) + +#endif /* _UAPI_TEGRA_FSICOM_H_ */ diff --git a/kernel/nvidia/include/uapi/linux/tegra_profiler.h b/kernel/nvidia/include/uapi/linux/tegra_profiler.h index 5a7abb5a83..23761232b3 100644 --- a/kernel/nvidia/include/uapi/linux/tegra_profiler.h +++ b/kernel/nvidia/include/uapi/linux/tegra_profiler.h @@ -1,7 +1,7 @@ /* * include/uapi/linux/tegra_profiler.h * - * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2013-2023, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -20,8 +20,8 @@ #include #include -#define QUADD_SAMPLES_VERSION 50 -#define QUADD_IO_VERSION 29 +#define QUADD_SAMPLES_VERSION 51 +#define QUADD_IO_VERSION 30 #define QUADD_IO_VERSION_DYNAMIC_RB 5 #define QUADD_IO_VERSION_RB_MAX_FILL_COUNT 6 @@ -48,6 +48,7 @@ #define QUADD_IO_VERSION_SAMPLING_CNTRL 27 #define QUADD_IO_VERSION_UNCORE_EVENTS 28 #define QUADD_IO_VERSION_EVENT_FILTER 29 +#define QUADD_IO_VERSION_CPUS_UINT 30 #define QUADD_SAMPLE_VERSION_THUMB_MODE_FLAG 17 #define QUADD_SAMPLE_VERSION_GROUP_SAMPLES 18 @@ -81,6 +82,7 @@ #define QUADD_SAMPLE_VERSION_UNCORE_EVENTS 48 #define QUADD_SAMPLE_VERSION_SEQID 49 #define QUADD_SAMPLE_VERSION_EVENT_FILTER 50 +#define QUADD_SAMPLE_VERSION_CPUS_UINT 51 #define QUADD_MMAP_HEADER_VERSION 1 @@ -264,7 +266,7 @@ struct quadd_sample_data { __u32 tgid; __u64 time; - __u8 cpu_id; + __u32 cpu_id; __u32 flags; __u8 callchain_nr; @@ -279,7 +281,7 @@ struct quadd_mmap_data { __u32 pid; __u64 time; - __u8 cpu_id; + __u32 cpu_id; __u16 flags; __u64 addr; @@ -336,26 +338,11 @@ struct quadd_sched_data { __u32 tgid; __u64 time; - __u8 cpu_id; + __u32 cpu_id; __u64 flags; __u16 task_state; }; -enum { - QM_DEBUG_SAMPLE_TYPE_SCHED_IN = 1, - QM_DEBUG_SAMPLE_TYPE_SCHED_OUT, - - QM_DEBUG_SAMPLE_TYPE_TIMER_HANDLE, - QM_DEBUG_SAMPLE_TYPE_TIMER_START, - QM_DEBUG_SAMPLE_TYPE_TIMER_CANCEL, - QM_DEBUG_SAMPLE_TYPE_TIMER_FORWARD, - - QM_DEBUG_SAMPLE_TYPE_READ_COUNTER, - - QM_DEBUG_SAMPLE_TYPE_SOURCE_START, - QM_DEBUG_SAMPLE_TYPE_SOURCE_STOP, -}; - #define QUADD_COMM_FLAG_EXEC (1U << 0) struct quadd_comm_data { @@ -367,22 +354,6 @@ struct quadd_comm_data { __u32 flags; }; -struct quadd_debug_data { - __u8 type; - - __u32 pid; - __u64 time; - - __u16 cpu:6, - user_mode:1, - lp_mode:1, - thumb_mode:1, - reserved:7; - - __u32 extra_value[2]; - __u16 extra_length; -}; - #define QUADD_HEADER_MAGIC 0x1122 #define QUADD_HDR_FLAG_BACKTRACE (1ULL << 0) @@ -416,7 +387,7 @@ struct quadd_header_data { __u16 samples_version; __u16 io_version; - __u8 cpu_id; + __u32 cpu_id; __u64 flags; __u32 freq; @@ -437,7 +408,6 @@ struct quadd_record_data { struct quadd_sample_data sample; struct quadd_mmap_data mmap; struct quadd_ma_data ma; - struct quadd_debug_data debug; struct quadd_header_data hdr; struct quadd_power_rate_data power_rate; struct quadd_hotplug_data hotplug; @@ -579,7 +549,7 @@ struct quadd_comm_cap_for_cpu { __u32 l2_cache:1, l2_multiple_events:1; - __u8 cpuid; + __u32 cpuid; struct quadd_events_cap events_cap; };